]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
4562236b 34#include <drm/drm_atomic_helper.h>
d38ceaf9
AD
35#include <drm/amdgpu_drm.h>
36#include <linux/vgaarb.h>
37#include <linux/vga_switcheroo.h>
38#include <linux/efi.h>
39#include "amdgpu.h"
f4b373f4 40#include "amdgpu_trace.h"
d38ceaf9
AD
41#include "amdgpu_i2c.h"
42#include "atom.h"
43#include "amdgpu_atombios.h"
a5bde2f9 44#include "amdgpu_atomfirmware.h"
d0dd7f0c 45#include "amd_pcie.h"
33f34802
KW
46#ifdef CONFIG_DRM_AMDGPU_SI
47#include "si.h"
48#endif
a2e73f56
AD
49#ifdef CONFIG_DRM_AMDGPU_CIK
50#include "cik.h"
51#endif
aaa36a97 52#include "vi.h"
460826e6 53#include "soc15.h"
d38ceaf9 54#include "bif/bif_4_1_d.h"
9accf2fd 55#include <linux/pci.h>
bec86378 56#include <linux/firmware.h>
89041940 57#include "amdgpu_vf_error.h"
d38ceaf9 58
ba997709 59#include "amdgpu_amdkfd.h"
d2f52ac8 60#include "amdgpu_pm.h"
d38ceaf9 61
e2a75f88 62MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
2d2e5e7e 63MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
e2a75f88 64
2dc80b00
S
65#define AMDGPU_RESUME_MS 2000
66
d38ceaf9
AD
67static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
68static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
4f0955fc 69static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
db95e218 70static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
d38ceaf9
AD
71
72static const char *amdgpu_asic_name[] = {
da69c161
KW
73 "TAHITI",
74 "PITCAIRN",
75 "VERDE",
76 "OLAND",
77 "HAINAN",
d38ceaf9
AD
78 "BONAIRE",
79 "KAVERI",
80 "KABINI",
81 "HAWAII",
82 "MULLINS",
83 "TOPAZ",
84 "TONGA",
48299f95 85 "FIJI",
d38ceaf9 86 "CARRIZO",
139f4917 87 "STONEY",
2cc0c0b5
FC
88 "POLARIS10",
89 "POLARIS11",
c4642a47 90 "POLARIS12",
d4196f01 91 "VEGA10",
2ca8a5d2 92 "RAVEN",
d38ceaf9
AD
93 "LAST",
94};
95
96bool amdgpu_device_is_px(struct drm_device *dev)
97{
98 struct amdgpu_device *adev = dev->dev_private;
99
2f7d10b3 100 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
101 return true;
102 return false;
103}
104
105/*
106 * MMIO register access helper functions.
107 */
108uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 109 uint32_t acc_flags)
d38ceaf9 110{
f4b373f4
TSD
111 uint32_t ret;
112
43ca8efa 113 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 114 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 115
15d72fd7 116 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 117 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
118 else {
119 unsigned long flags;
d38ceaf9
AD
120
121 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
122 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
123 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
124 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 125 }
f4b373f4
TSD
126 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
127 return ret;
d38ceaf9
AD
128}
129
130void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 131 uint32_t acc_flags)
d38ceaf9 132{
f4b373f4 133 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 134
47ed4e1c
KW
135 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
136 adev->last_mm_index = v;
137 }
138
43ca8efa 139 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 140 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 141
15d72fd7 142 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
143 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
144 else {
145 unsigned long flags;
146
147 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
148 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
149 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
150 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
151 }
47ed4e1c
KW
152
153 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
154 udelay(500);
155 }
d38ceaf9
AD
156}
157
158u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
159{
160 if ((reg * 4) < adev->rio_mem_size)
161 return ioread32(adev->rio_mem + (reg * 4));
162 else {
163 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
164 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
165 }
166}
167
168void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
169{
47ed4e1c
KW
170 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
171 adev->last_mm_index = v;
172 }
d38ceaf9
AD
173
174 if ((reg * 4) < adev->rio_mem_size)
175 iowrite32(v, adev->rio_mem + (reg * 4));
176 else {
177 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
178 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
179 }
47ed4e1c
KW
180
181 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
182 udelay(500);
183 }
d38ceaf9
AD
184}
185
186/**
187 * amdgpu_mm_rdoorbell - read a doorbell dword
188 *
189 * @adev: amdgpu_device pointer
190 * @index: doorbell index
191 *
192 * Returns the value in the doorbell aperture at the
193 * requested doorbell index (CIK).
194 */
195u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
196{
197 if (index < adev->doorbell.num_doorbells) {
198 return readl(adev->doorbell.ptr + index);
199 } else {
200 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
201 return 0;
202 }
203}
204
205/**
206 * amdgpu_mm_wdoorbell - write a doorbell dword
207 *
208 * @adev: amdgpu_device pointer
209 * @index: doorbell index
210 * @v: value to write
211 *
212 * Writes @v to the doorbell aperture at the
213 * requested doorbell index (CIK).
214 */
215void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
216{
217 if (index < adev->doorbell.num_doorbells) {
218 writel(v, adev->doorbell.ptr + index);
219 } else {
220 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
221 }
222}
223
832be404
KW
224/**
225 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
226 *
227 * @adev: amdgpu_device pointer
228 * @index: doorbell index
229 *
230 * Returns the value in the doorbell aperture at the
231 * requested doorbell index (VEGA10+).
232 */
233u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
234{
235 if (index < adev->doorbell.num_doorbells) {
236 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
237 } else {
238 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
239 return 0;
240 }
241}
242
243/**
244 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
245 *
246 * @adev: amdgpu_device pointer
247 * @index: doorbell index
248 * @v: value to write
249 *
250 * Writes @v to the doorbell aperture at the
251 * requested doorbell index (VEGA10+).
252 */
253void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
254{
255 if (index < adev->doorbell.num_doorbells) {
256 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
257 } else {
258 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
259 }
260}
261
d38ceaf9
AD
262/**
263 * amdgpu_invalid_rreg - dummy reg read function
264 *
265 * @adev: amdgpu device pointer
266 * @reg: offset of register
267 *
268 * Dummy register read function. Used for register blocks
269 * that certain asics don't have (all asics).
270 * Returns the value in the register.
271 */
272static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
273{
274 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
275 BUG();
276 return 0;
277}
278
279/**
280 * amdgpu_invalid_wreg - dummy reg write function
281 *
282 * @adev: amdgpu device pointer
283 * @reg: offset of register
284 * @v: value to write to the register
285 *
286 * Dummy register read function. Used for register blocks
287 * that certain asics don't have (all asics).
288 */
289static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
290{
291 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
292 reg, v);
293 BUG();
294}
295
296/**
297 * amdgpu_block_invalid_rreg - dummy reg read function
298 *
299 * @adev: amdgpu device pointer
300 * @block: offset of instance
301 * @reg: offset of register
302 *
303 * Dummy register read function. Used for register blocks
304 * that certain asics don't have (all asics).
305 * Returns the value in the register.
306 */
307static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
308 uint32_t block, uint32_t reg)
309{
310 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
311 reg, block);
312 BUG();
313 return 0;
314}
315
316/**
317 * amdgpu_block_invalid_wreg - dummy reg write function
318 *
319 * @adev: amdgpu device pointer
320 * @block: offset of instance
321 * @reg: offset of register
322 * @v: value to write to the register
323 *
324 * Dummy register read function. Used for register blocks
325 * that certain asics don't have (all asics).
326 */
327static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
328 uint32_t block,
329 uint32_t reg, uint32_t v)
330{
331 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
332 reg, block, v);
333 BUG();
334}
335
336static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
337{
a4a02777
CK
338 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
339 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
340 &adev->vram_scratch.robj,
341 &adev->vram_scratch.gpu_addr,
342 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
343}
344
345static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
346{
078af1a3 347 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
348}
349
350/**
351 * amdgpu_program_register_sequence - program an array of registers.
352 *
353 * @adev: amdgpu_device pointer
354 * @registers: pointer to the register array
355 * @array_size: size of the register array
356 *
357 * Programs an array or registers with and and or masks.
358 * This is a helper for setting golden registers.
359 */
360void amdgpu_program_register_sequence(struct amdgpu_device *adev,
361 const u32 *registers,
362 const u32 array_size)
363{
364 u32 tmp, reg, and_mask, or_mask;
365 int i;
366
367 if (array_size % 3)
368 return;
369
370 for (i = 0; i < array_size; i +=3) {
371 reg = registers[i + 0];
372 and_mask = registers[i + 1];
373 or_mask = registers[i + 2];
374
375 if (and_mask == 0xffffffff) {
376 tmp = or_mask;
377 } else {
378 tmp = RREG32(reg);
379 tmp &= ~and_mask;
380 tmp |= or_mask;
381 }
382 WREG32(reg, tmp);
383 }
384}
385
386void amdgpu_pci_config_reset(struct amdgpu_device *adev)
387{
388 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
389}
390
391/*
392 * GPU doorbell aperture helpers function.
393 */
394/**
395 * amdgpu_doorbell_init - Init doorbell driver information.
396 *
397 * @adev: amdgpu_device pointer
398 *
399 * Init doorbell driver information (CIK)
400 * Returns 0 on success, error on failure.
401 */
402static int amdgpu_doorbell_init(struct amdgpu_device *adev)
403{
705e519e
CK
404 /* No doorbell on SI hardware generation */
405 if (adev->asic_type < CHIP_BONAIRE) {
406 adev->doorbell.base = 0;
407 adev->doorbell.size = 0;
408 adev->doorbell.num_doorbells = 0;
409 adev->doorbell.ptr = NULL;
410 return 0;
411 }
412
d38ceaf9
AD
413 /* doorbell bar mapping */
414 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
415 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
416
edf600da 417 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
418 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
419 if (adev->doorbell.num_doorbells == 0)
420 return -EINVAL;
421
8972e5d2
CK
422 adev->doorbell.ptr = ioremap(adev->doorbell.base,
423 adev->doorbell.num_doorbells *
424 sizeof(u32));
425 if (adev->doorbell.ptr == NULL)
d38ceaf9 426 return -ENOMEM;
d38ceaf9
AD
427
428 return 0;
429}
430
431/**
432 * amdgpu_doorbell_fini - Tear down doorbell driver information.
433 *
434 * @adev: amdgpu_device pointer
435 *
436 * Tear down doorbell driver information (CIK)
437 */
438static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
439{
440 iounmap(adev->doorbell.ptr);
441 adev->doorbell.ptr = NULL;
442}
443
444/**
445 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
446 * setup amdkfd
447 *
448 * @adev: amdgpu_device pointer
449 * @aperture_base: output returning doorbell aperture base physical address
450 * @aperture_size: output returning doorbell aperture size in bytes
451 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
452 *
453 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
454 * takes doorbells required for its own rings and reports the setup to amdkfd.
455 * amdgpu reserved doorbells are at the start of the doorbell aperture.
456 */
457void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
458 phys_addr_t *aperture_base,
459 size_t *aperture_size,
460 size_t *start_offset)
461{
462 /*
463 * The first num_doorbells are used by amdgpu.
464 * amdkfd takes whatever's left in the aperture.
465 */
466 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
467 *aperture_base = adev->doorbell.base;
468 *aperture_size = adev->doorbell.size;
469 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
470 } else {
471 *aperture_base = 0;
472 *aperture_size = 0;
473 *start_offset = 0;
474 }
475}
476
477/*
478 * amdgpu_wb_*()
455a7bc2 479 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 480 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
481 */
482
483/**
484 * amdgpu_wb_fini - Disable Writeback and free memory
485 *
486 * @adev: amdgpu_device pointer
487 *
488 * Disables Writeback and frees the Writeback memory (all asics).
489 * Used at driver shutdown.
490 */
491static void amdgpu_wb_fini(struct amdgpu_device *adev)
492{
493 if (adev->wb.wb_obj) {
a76ed485
AD
494 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
495 &adev->wb.gpu_addr,
496 (void **)&adev->wb.wb);
d38ceaf9
AD
497 adev->wb.wb_obj = NULL;
498 }
499}
500
501/**
502 * amdgpu_wb_init- Init Writeback driver info and allocate memory
503 *
504 * @adev: amdgpu_device pointer
505 *
455a7bc2 506 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
507 * Used at driver startup.
508 * Returns 0 on success or an -error on failure.
509 */
510static int amdgpu_wb_init(struct amdgpu_device *adev)
511{
512 int r;
513
514 if (adev->wb.wb_obj == NULL) {
97407b63
AD
515 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
516 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
517 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
518 &adev->wb.wb_obj, &adev->wb.gpu_addr,
519 (void **)&adev->wb.wb);
d38ceaf9
AD
520 if (r) {
521 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
522 return r;
523 }
d38ceaf9
AD
524
525 adev->wb.num_wb = AMDGPU_MAX_WB;
526 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
527
528 /* clear wb memory */
74caa461 529 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
d38ceaf9
AD
530 }
531
532 return 0;
533}
534
535/**
536 * amdgpu_wb_get - Allocate a wb entry
537 *
538 * @adev: amdgpu_device pointer
539 * @wb: wb index
540 *
541 * Allocate a wb slot for use by the driver (all asics).
542 * Returns 0 on success or -EINVAL on failure.
543 */
544int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
545{
546 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 547
97407b63 548 if (offset < adev->wb.num_wb) {
7014285a 549 __set_bit(offset, adev->wb.used);
63ae07ca 550 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
551 return 0;
552 } else {
553 return -EINVAL;
554 }
555}
556
d38ceaf9
AD
557/**
558 * amdgpu_wb_free - Free a wb entry
559 *
560 * @adev: amdgpu_device pointer
561 * @wb: wb index
562 *
563 * Free a wb slot allocated for use by the driver (all asics)
564 */
565void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
566{
74caa461 567 wb >>= 3;
d38ceaf9 568 if (wb < adev->wb.num_wb)
74caa461 569 __clear_bit(wb, adev->wb.used);
d38ceaf9
AD
570}
571
572/**
573 * amdgpu_vram_location - try to find VRAM location
574 * @adev: amdgpu device structure holding all necessary informations
575 * @mc: memory controller structure holding memory informations
576 * @base: base address at which to put VRAM
577 *
455a7bc2 578 * Function will try to place VRAM at base address provided
d38ceaf9
AD
579 * as parameter (which is so far either PCI aperture address or
580 * for IGP TOM base address).
581 *
582 * If there is not enough space to fit the unvisible VRAM in the 32bits
583 * address space then we limit the VRAM size to the aperture.
584 *
585 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
586 * this shouldn't be a problem as we are using the PCI aperture as a reference.
587 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
588 * not IGP.
589 *
590 * Note: we use mc_vram_size as on some board we need to program the mc to
591 * cover the whole aperture even if VRAM size is inferior to aperture size
592 * Novell bug 204882 + along with lots of ubuntu ones
593 *
594 * Note: when limiting vram it's safe to overwritte real_vram_size because
595 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
596 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
597 * ones)
598 *
599 * Note: IGP TOM addr should be the same as the aperture addr, we don't
455a7bc2 600 * explicitly check for that though.
d38ceaf9
AD
601 *
602 * FIXME: when reducing VRAM size align new size on power of 2.
603 */
604void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
605{
606 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
607
608 mc->vram_start = base;
609 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
610 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
611 mc->real_vram_size = mc->aper_size;
612 mc->mc_vram_size = mc->aper_size;
613 }
614 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
615 if (limit && limit < mc->real_vram_size)
616 mc->real_vram_size = limit;
617 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
618 mc->mc_vram_size >> 20, mc->vram_start,
619 mc->vram_end, mc->real_vram_size >> 20);
620}
621
622/**
6f02a696 623 * amdgpu_gart_location - try to find GTT location
d38ceaf9
AD
624 * @adev: amdgpu device structure holding all necessary informations
625 * @mc: memory controller structure holding memory informations
626 *
627 * Function will place try to place GTT before or after VRAM.
628 *
629 * If GTT size is bigger than space left then we ajust GTT size.
630 * Thus function will never fails.
631 *
632 * FIXME: when reducing GTT size align new size on power of 2.
633 */
6f02a696 634void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
d38ceaf9
AD
635{
636 u64 size_af, size_bf;
637
ed21c047
CK
638 size_af = adev->mc.mc_mask - mc->vram_end;
639 size_bf = mc->vram_start;
d38ceaf9 640 if (size_bf > size_af) {
6f02a696 641 if (mc->gart_size > size_bf) {
d38ceaf9 642 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 643 mc->gart_size = size_bf;
d38ceaf9 644 }
6f02a696 645 mc->gart_start = 0;
d38ceaf9 646 } else {
6f02a696 647 if (mc->gart_size > size_af) {
d38ceaf9 648 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 649 mc->gart_size = size_af;
d38ceaf9 650 }
6f02a696 651 mc->gart_start = mc->vram_end + 1;
d38ceaf9 652 }
6f02a696 653 mc->gart_end = mc->gart_start + mc->gart_size - 1;
d38ceaf9 654 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6f02a696 655 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
d38ceaf9
AD
656}
657
a05502e5
HC
658/*
659 * Firmware Reservation functions
660 */
661/**
662 * amdgpu_fw_reserve_vram_fini - free fw reserved vram
663 *
664 * @adev: amdgpu_device pointer
665 *
666 * free fw reserved vram if it has been reserved.
667 */
668void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
669{
670 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
671 NULL, &adev->fw_vram_usage.va);
672}
673
674/**
675 * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
676 *
677 * @adev: amdgpu_device pointer
678 *
679 * create bo vram reservation from fw.
680 */
681int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
682{
683 int r = 0;
684 u64 gpu_addr;
685 u64 vram_size = adev->mc.visible_vram_size;
686
687 adev->fw_vram_usage.va = NULL;
688 adev->fw_vram_usage.reserved_bo = NULL;
689
690 if (adev->fw_vram_usage.size > 0 &&
691 adev->fw_vram_usage.size <= vram_size) {
692
693 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
694 PAGE_SIZE, true, 0,
695 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
696 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
697 &adev->fw_vram_usage.reserved_bo);
698 if (r)
699 goto error_create;
700
701 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
702 if (r)
703 goto error_reserve;
704 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
705 AMDGPU_GEM_DOMAIN_VRAM,
706 adev->fw_vram_usage.start_offset,
707 (adev->fw_vram_usage.start_offset +
708 adev->fw_vram_usage.size), &gpu_addr);
709 if (r)
710 goto error_pin;
711 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
712 &adev->fw_vram_usage.va);
713 if (r)
714 goto error_kmap;
715
716 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
717 }
718 return r;
719
720error_kmap:
721 amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
722error_pin:
723 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
724error_reserve:
725 amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
726error_create:
727 adev->fw_vram_usage.va = NULL;
728 adev->fw_vram_usage.reserved_bo = NULL;
729 return r;
730}
731
732
d38ceaf9
AD
733/*
734 * GPU helpers function.
735 */
736/**
c836fec5 737 * amdgpu_need_post - check if the hw need post or not
d38ceaf9
AD
738 *
739 * @adev: amdgpu_device pointer
740 *
c836fec5
JQ
741 * Check if the asic has been initialized (all asics) at driver startup
742 * or post is needed if hw reset is performed.
743 * Returns true if need or false if not.
d38ceaf9 744 */
c836fec5 745bool amdgpu_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
746{
747 uint32_t reg;
748
bec86378
ML
749 if (amdgpu_sriov_vf(adev))
750 return false;
751
752 if (amdgpu_passthrough(adev)) {
1da2c326
ML
753 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
754 * some old smc fw still need driver do vPost otherwise gpu hang, while
755 * those smc fw version above 22.15 doesn't have this flaw, so we force
756 * vpost executed for smc version below 22.15
bec86378
ML
757 */
758 if (adev->asic_type == CHIP_FIJI) {
759 int err;
760 uint32_t fw_ver;
761 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
762 /* force vPost if error occured */
763 if (err)
764 return true;
765
766 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
767 if (fw_ver < 0x00160e00)
768 return true;
bec86378 769 }
bec86378 770 }
91fe77eb 771
772 if (adev->has_hw_reset) {
773 adev->has_hw_reset = false;
774 return true;
775 }
776
777 /* bios scratch used on CIK+ */
778 if (adev->asic_type >= CHIP_BONAIRE)
779 return amdgpu_atombios_scratch_need_asic_init(adev);
780
781 /* check MEM_SIZE for older asics */
782 reg = amdgpu_asic_get_config_memsize(adev);
783
784 if ((reg != 0) && (reg != 0xffffffff))
785 return false;
786
787 return true;
bec86378
ML
788}
789
d38ceaf9
AD
790/**
791 * amdgpu_dummy_page_init - init dummy page used by the driver
792 *
793 * @adev: amdgpu_device pointer
794 *
795 * Allocate the dummy page used by the driver (all asics).
796 * This dummy page is used by the driver as a filler for gart entries
797 * when pages are taken out of the GART
798 * Returns 0 on sucess, -ENOMEM on failure.
799 */
800int amdgpu_dummy_page_init(struct amdgpu_device *adev)
801{
802 if (adev->dummy_page.page)
803 return 0;
804 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
805 if (adev->dummy_page.page == NULL)
806 return -ENOMEM;
807 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
808 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
809 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
810 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
811 __free_page(adev->dummy_page.page);
812 adev->dummy_page.page = NULL;
813 return -ENOMEM;
814 }
815 return 0;
816}
817
818/**
819 * amdgpu_dummy_page_fini - free dummy page used by the driver
820 *
821 * @adev: amdgpu_device pointer
822 *
823 * Frees the dummy page used by the driver (all asics).
824 */
825void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
826{
827 if (adev->dummy_page.page == NULL)
828 return;
829 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
830 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
831 __free_page(adev->dummy_page.page);
832 adev->dummy_page.page = NULL;
833}
834
835
836/* ATOM accessor methods */
837/*
838 * ATOM is an interpreted byte code stored in tables in the vbios. The
839 * driver registers callbacks to access registers and the interpreter
840 * in the driver parses the tables and executes then to program specific
841 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
842 * atombios.h, and atom.c
843 */
844
845/**
846 * cail_pll_read - read PLL register
847 *
848 * @info: atom card_info pointer
849 * @reg: PLL register offset
850 *
851 * Provides a PLL register accessor for the atom interpreter (r4xx+).
852 * Returns the value of the PLL register.
853 */
854static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
855{
856 return 0;
857}
858
859/**
860 * cail_pll_write - write PLL register
861 *
862 * @info: atom card_info pointer
863 * @reg: PLL register offset
864 * @val: value to write to the pll register
865 *
866 * Provides a PLL register accessor for the atom interpreter (r4xx+).
867 */
868static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
869{
870
871}
872
873/**
874 * cail_mc_read - read MC (Memory Controller) register
875 *
876 * @info: atom card_info pointer
877 * @reg: MC register offset
878 *
879 * Provides an MC register accessor for the atom interpreter (r4xx+).
880 * Returns the value of the MC register.
881 */
882static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
883{
884 return 0;
885}
886
887/**
888 * cail_mc_write - write MC (Memory Controller) register
889 *
890 * @info: atom card_info pointer
891 * @reg: MC register offset
892 * @val: value to write to the pll register
893 *
894 * Provides a MC register accessor for the atom interpreter (r4xx+).
895 */
896static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
897{
898
899}
900
901/**
902 * cail_reg_write - write MMIO register
903 *
904 * @info: atom card_info pointer
905 * @reg: MMIO register offset
906 * @val: value to write to the pll register
907 *
908 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
909 */
910static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
911{
912 struct amdgpu_device *adev = info->dev->dev_private;
913
914 WREG32(reg, val);
915}
916
917/**
918 * cail_reg_read - read MMIO register
919 *
920 * @info: atom card_info pointer
921 * @reg: MMIO register offset
922 *
923 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
924 * Returns the value of the MMIO register.
925 */
926static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
927{
928 struct amdgpu_device *adev = info->dev->dev_private;
929 uint32_t r;
930
931 r = RREG32(reg);
932 return r;
933}
934
935/**
936 * cail_ioreg_write - write IO register
937 *
938 * @info: atom card_info pointer
939 * @reg: IO register offset
940 * @val: value to write to the pll register
941 *
942 * Provides a IO register accessor for the atom interpreter (r4xx+).
943 */
944static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
945{
946 struct amdgpu_device *adev = info->dev->dev_private;
947
948 WREG32_IO(reg, val);
949}
950
951/**
952 * cail_ioreg_read - read IO register
953 *
954 * @info: atom card_info pointer
955 * @reg: IO register offset
956 *
957 * Provides an IO register accessor for the atom interpreter (r4xx+).
958 * Returns the value of the IO register.
959 */
960static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
961{
962 struct amdgpu_device *adev = info->dev->dev_private;
963 uint32_t r;
964
965 r = RREG32_IO(reg);
966 return r;
967}
968
5b41d94c
KR
969static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
970 struct device_attribute *attr,
971 char *buf)
972{
973 struct drm_device *ddev = dev_get_drvdata(dev);
974 struct amdgpu_device *adev = ddev->dev_private;
975 struct atom_context *ctx = adev->mode_info.atom_context;
976
977 return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
978}
979
980static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
981 NULL);
982
d38ceaf9
AD
983/**
984 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
985 *
986 * @adev: amdgpu_device pointer
987 *
988 * Frees the driver info and register access callbacks for the ATOM
989 * interpreter (r4xx+).
990 * Called at driver shutdown.
991 */
992static void amdgpu_atombios_fini(struct amdgpu_device *adev)
993{
89e0ec9f 994 if (adev->mode_info.atom_context) {
d38ceaf9 995 kfree(adev->mode_info.atom_context->scratch);
89e0ec9f
ML
996 kfree(adev->mode_info.atom_context->iio);
997 }
d38ceaf9
AD
998 kfree(adev->mode_info.atom_context);
999 adev->mode_info.atom_context = NULL;
1000 kfree(adev->mode_info.atom_card_info);
1001 adev->mode_info.atom_card_info = NULL;
5b41d94c 1002 device_remove_file(adev->dev, &dev_attr_vbios_version);
d38ceaf9
AD
1003}
1004
1005/**
1006 * amdgpu_atombios_init - init the driver info and callbacks for atombios
1007 *
1008 * @adev: amdgpu_device pointer
1009 *
1010 * Initializes the driver info and register access callbacks for the
1011 * ATOM interpreter (r4xx+).
1012 * Returns 0 on sucess, -ENOMEM on failure.
1013 * Called at driver startup.
1014 */
1015static int amdgpu_atombios_init(struct amdgpu_device *adev)
1016{
1017 struct card_info *atom_card_info =
1018 kzalloc(sizeof(struct card_info), GFP_KERNEL);
5b41d94c 1019 int ret;
d38ceaf9
AD
1020
1021 if (!atom_card_info)
1022 return -ENOMEM;
1023
1024 adev->mode_info.atom_card_info = atom_card_info;
1025 atom_card_info->dev = adev->ddev;
1026 atom_card_info->reg_read = cail_reg_read;
1027 atom_card_info->reg_write = cail_reg_write;
1028 /* needed for iio ops */
1029 if (adev->rio_mem) {
1030 atom_card_info->ioreg_read = cail_ioreg_read;
1031 atom_card_info->ioreg_write = cail_ioreg_write;
1032 } else {
b64a18c5 1033 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
d38ceaf9
AD
1034 atom_card_info->ioreg_read = cail_reg_read;
1035 atom_card_info->ioreg_write = cail_reg_write;
1036 }
1037 atom_card_info->mc_read = cail_mc_read;
1038 atom_card_info->mc_write = cail_mc_write;
1039 atom_card_info->pll_read = cail_pll_read;
1040 atom_card_info->pll_write = cail_pll_write;
1041
1042 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
1043 if (!adev->mode_info.atom_context) {
1044 amdgpu_atombios_fini(adev);
1045 return -ENOMEM;
1046 }
1047
1048 mutex_init(&adev->mode_info.atom_context->mutex);
a5bde2f9
AD
1049 if (adev->is_atom_fw) {
1050 amdgpu_atomfirmware_scratch_regs_init(adev);
1051 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1052 } else {
1053 amdgpu_atombios_scratch_regs_init(adev);
1054 amdgpu_atombios_allocate_fb_scratch(adev);
1055 }
5b41d94c
KR
1056
1057 ret = device_create_file(adev->dev, &dev_attr_vbios_version);
1058 if (ret) {
1059 DRM_ERROR("Failed to create device file for VBIOS version\n");
1060 return ret;
1061 }
1062
d38ceaf9
AD
1063 return 0;
1064}
1065
1066/* if we get transitioned to only one device, take VGA back */
1067/**
1068 * amdgpu_vga_set_decode - enable/disable vga decode
1069 *
1070 * @cookie: amdgpu_device pointer
1071 * @state: enable/disable vga decode
1072 *
1073 * Enable/disable vga decode (all asics).
1074 * Returns VGA resource flags.
1075 */
1076static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1077{
1078 struct amdgpu_device *adev = cookie;
1079 amdgpu_asic_set_vga_state(adev, state);
1080 if (state)
1081 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1082 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1083 else
1084 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1085}
1086
bab4fee7 1087static void amdgpu_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
1088{
1089 /* defines number of bits in page table versus page directory,
1090 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1091 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
1092 if (amdgpu_vm_block_size == -1)
1093 return;
a1adf8be 1094
bab4fee7 1095 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
1096 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1097 amdgpu_vm_block_size);
bab4fee7 1098 goto def_value;
a1adf8be
CZ
1099 }
1100
1101 if (amdgpu_vm_block_size > 24 ||
1102 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1103 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1104 amdgpu_vm_block_size);
bab4fee7 1105 goto def_value;
a1adf8be 1106 }
bab4fee7
JZ
1107
1108 return;
1109
1110def_value:
1111 amdgpu_vm_block_size = -1;
a1adf8be
CZ
1112}
1113
83ca145d
ZJ
1114static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1115{
64dab074
AD
1116 /* no need to check the default value */
1117 if (amdgpu_vm_size == -1)
1118 return;
1119
76117507 1120 if (!is_power_of_2(amdgpu_vm_size)) {
83ca145d
ZJ
1121 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1122 amdgpu_vm_size);
1123 goto def_value;
1124 }
1125
1126 if (amdgpu_vm_size < 1) {
1127 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1128 amdgpu_vm_size);
1129 goto def_value;
1130 }
1131
1132 /*
1133 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1134 */
1135 if (amdgpu_vm_size > 1024) {
1136 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1137 amdgpu_vm_size);
1138 goto def_value;
1139 }
1140
1141 return;
1142
1143def_value:
bab4fee7 1144 amdgpu_vm_size = -1;
83ca145d
ZJ
1145}
1146
d38ceaf9
AD
1147/**
1148 * amdgpu_check_arguments - validate module params
1149 *
1150 * @adev: amdgpu_device pointer
1151 *
1152 * Validates certain module parameters and updates
1153 * the associated values used by the driver (all asics).
1154 */
1155static void amdgpu_check_arguments(struct amdgpu_device *adev)
1156{
5b011235
CZ
1157 if (amdgpu_sched_jobs < 4) {
1158 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1159 amdgpu_sched_jobs);
1160 amdgpu_sched_jobs = 4;
76117507 1161 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
1162 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1163 amdgpu_sched_jobs);
1164 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1165 }
d38ceaf9 1166
83e74db6 1167 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
1168 /* gart size must be greater or equal to 32M */
1169 dev_warn(adev->dev, "gart size (%d) too small\n",
1170 amdgpu_gart_size);
83e74db6 1171 amdgpu_gart_size = -1;
d38ceaf9
AD
1172 }
1173
36d38372 1174 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 1175 /* gtt size must be greater or equal to 32M */
36d38372
CK
1176 dev_warn(adev->dev, "gtt size (%d) too small\n",
1177 amdgpu_gtt_size);
1178 amdgpu_gtt_size = -1;
d38ceaf9
AD
1179 }
1180
d07f14be
RH
1181 /* valid range is between 4 and 9 inclusive */
1182 if (amdgpu_vm_fragment_size != -1 &&
1183 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1184 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1185 amdgpu_vm_fragment_size = -1;
1186 }
1187
83ca145d 1188 amdgpu_check_vm_size(adev);
d38ceaf9 1189
bab4fee7 1190 amdgpu_check_block_size(adev);
6a7f76e7 1191
526bae37 1192 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 1193 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
1194 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1195 amdgpu_vram_page_split);
1196 amdgpu_vram_page_split = 1024;
1197 }
d38ceaf9
AD
1198}
1199
1200/**
1201 * amdgpu_switcheroo_set_state - set switcheroo state
1202 *
1203 * @pdev: pci dev pointer
1694467b 1204 * @state: vga_switcheroo state
d38ceaf9
AD
1205 *
1206 * Callback for the switcheroo driver. Suspends or resumes the
1207 * the asics before or after it is powered up using ACPI methods.
1208 */
1209static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1210{
1211 struct drm_device *dev = pci_get_drvdata(pdev);
1212
1213 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1214 return;
1215
1216 if (state == VGA_SWITCHEROO_ON) {
7ca85295 1217 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1218 /* don't suspend or resume card normally */
1219 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1220
810ddc3a 1221 amdgpu_device_resume(dev, true, true);
d38ceaf9 1222
d38ceaf9
AD
1223 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1224 drm_kms_helper_poll_enable(dev);
1225 } else {
7ca85295 1226 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1227 drm_kms_helper_poll_disable(dev);
1228 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1229 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1230 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1231 }
1232}
1233
1234/**
1235 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1236 *
1237 * @pdev: pci dev pointer
1238 *
1239 * Callback for the switcheroo driver. Check of the switcheroo
1240 * state can be changed.
1241 * Returns true if the state can be changed, false if not.
1242 */
1243static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1244{
1245 struct drm_device *dev = pci_get_drvdata(pdev);
1246
1247 /*
1248 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1249 * locking inversion with the driver load path. And the access here is
1250 * completely racy anyway. So don't bother with locking for now.
1251 */
1252 return dev->open_count == 0;
1253}
1254
1255static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1256 .set_gpu_state = amdgpu_switcheroo_set_state,
1257 .reprobe = NULL,
1258 .can_switch = amdgpu_switcheroo_can_switch,
1259};
1260
1261int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
5fc3aeeb 1262 enum amd_ip_block_type block_type,
1263 enum amd_clockgating_state state)
d38ceaf9
AD
1264{
1265 int i, r = 0;
1266
1267 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1268 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1269 continue;
c722865a
RZ
1270 if (adev->ip_blocks[i].version->type != block_type)
1271 continue;
1272 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1273 continue;
1274 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1275 (void *)adev, state);
1276 if (r)
1277 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1278 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1279 }
1280 return r;
1281}
1282
1283int amdgpu_set_powergating_state(struct amdgpu_device *adev,
5fc3aeeb 1284 enum amd_ip_block_type block_type,
1285 enum amd_powergating_state state)
d38ceaf9
AD
1286{
1287 int i, r = 0;
1288
1289 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1290 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1291 continue;
c722865a
RZ
1292 if (adev->ip_blocks[i].version->type != block_type)
1293 continue;
1294 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1295 continue;
1296 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1297 (void *)adev, state);
1298 if (r)
1299 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1300 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1301 }
1302 return r;
1303}
1304
6cb2d4e4
HR
1305void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1306{
1307 int i;
1308
1309 for (i = 0; i < adev->num_ip_blocks; i++) {
1310 if (!adev->ip_blocks[i].status.valid)
1311 continue;
1312 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1313 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1314 }
1315}
1316
5dbbb60b
AD
1317int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1318 enum amd_ip_block_type block_type)
1319{
1320 int i, r;
1321
1322 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1323 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1324 continue;
a1255107
AD
1325 if (adev->ip_blocks[i].version->type == block_type) {
1326 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1327 if (r)
1328 return r;
1329 break;
1330 }
1331 }
1332 return 0;
1333
1334}
1335
1336bool amdgpu_is_idle(struct amdgpu_device *adev,
1337 enum amd_ip_block_type block_type)
1338{
1339 int i;
1340
1341 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1342 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1343 continue;
a1255107
AD
1344 if (adev->ip_blocks[i].version->type == block_type)
1345 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1346 }
1347 return true;
1348
1349}
1350
a1255107
AD
1351struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1352 enum amd_ip_block_type type)
d38ceaf9
AD
1353{
1354 int i;
1355
1356 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1357 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1358 return &adev->ip_blocks[i];
1359
1360 return NULL;
1361}
1362
1363/**
1364 * amdgpu_ip_block_version_cmp
1365 *
1366 * @adev: amdgpu_device pointer
5fc3aeeb 1367 * @type: enum amd_ip_block_type
d38ceaf9
AD
1368 * @major: major version
1369 * @minor: minor version
1370 *
1371 * return 0 if equal or greater
1372 * return 1 if smaller or the ip_block doesn't exist
1373 */
1374int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
5fc3aeeb 1375 enum amd_ip_block_type type,
d38ceaf9
AD
1376 u32 major, u32 minor)
1377{
a1255107 1378 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
d38ceaf9 1379
a1255107
AD
1380 if (ip_block && ((ip_block->version->major > major) ||
1381 ((ip_block->version->major == major) &&
1382 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1383 return 0;
1384
1385 return 1;
1386}
1387
a1255107
AD
1388/**
1389 * amdgpu_ip_block_add
1390 *
1391 * @adev: amdgpu_device pointer
1392 * @ip_block_version: pointer to the IP to add
1393 *
1394 * Adds the IP block driver information to the collection of IPs
1395 * on the asic.
1396 */
1397int amdgpu_ip_block_add(struct amdgpu_device *adev,
1398 const struct amdgpu_ip_block_version *ip_block_version)
1399{
1400 if (!ip_block_version)
1401 return -EINVAL;
1402
a0bae357
HR
1403 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1404 ip_block_version->funcs->name);
1405
a1255107
AD
1406 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1407
1408 return 0;
1409}
1410
483ef985 1411static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1412{
1413 adev->enable_virtual_display = false;
1414
1415 if (amdgpu_virtual_display) {
1416 struct drm_device *ddev = adev->ddev;
1417 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1418 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1419
1420 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1421 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1422 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1423 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1424 if (!strcmp("all", pciaddname)
1425 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1426 long num_crtc;
1427 int res = -1;
1428
9accf2fd 1429 adev->enable_virtual_display = true;
0f66356d
ED
1430
1431 if (pciaddname_tmp)
1432 res = kstrtol(pciaddname_tmp, 10,
1433 &num_crtc);
1434
1435 if (!res) {
1436 if (num_crtc < 1)
1437 num_crtc = 1;
1438 if (num_crtc > 6)
1439 num_crtc = 6;
1440 adev->mode_info.num_crtc = num_crtc;
1441 } else {
1442 adev->mode_info.num_crtc = 1;
1443 }
9accf2fd
ED
1444 break;
1445 }
1446 }
1447
0f66356d
ED
1448 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1449 amdgpu_virtual_display, pci_address_name,
1450 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1451
1452 kfree(pciaddstr);
1453 }
1454}
1455
e2a75f88
AD
1456static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1457{
e2a75f88
AD
1458 const char *chip_name;
1459 char fw_name[30];
1460 int err;
1461 const struct gpu_info_firmware_header_v1_0 *hdr;
1462
ab4fe3e1
HR
1463 adev->firmware.gpu_info_fw = NULL;
1464
e2a75f88
AD
1465 switch (adev->asic_type) {
1466 case CHIP_TOPAZ:
1467 case CHIP_TONGA:
1468 case CHIP_FIJI:
1469 case CHIP_POLARIS11:
1470 case CHIP_POLARIS10:
1471 case CHIP_POLARIS12:
1472 case CHIP_CARRIZO:
1473 case CHIP_STONEY:
1474#ifdef CONFIG_DRM_AMDGPU_SI
1475 case CHIP_VERDE:
1476 case CHIP_TAHITI:
1477 case CHIP_PITCAIRN:
1478 case CHIP_OLAND:
1479 case CHIP_HAINAN:
1480#endif
1481#ifdef CONFIG_DRM_AMDGPU_CIK
1482 case CHIP_BONAIRE:
1483 case CHIP_HAWAII:
1484 case CHIP_KAVERI:
1485 case CHIP_KABINI:
1486 case CHIP_MULLINS:
1487#endif
1488 default:
1489 return 0;
1490 case CHIP_VEGA10:
1491 chip_name = "vega10";
1492 break;
2d2e5e7e
AD
1493 case CHIP_RAVEN:
1494 chip_name = "raven";
1495 break;
e2a75f88
AD
1496 }
1497
1498 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1499 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1500 if (err) {
1501 dev_err(adev->dev,
1502 "Failed to load gpu_info firmware \"%s\"\n",
1503 fw_name);
1504 goto out;
1505 }
ab4fe3e1 1506 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1507 if (err) {
1508 dev_err(adev->dev,
1509 "Failed to validate gpu_info firmware \"%s\"\n",
1510 fw_name);
1511 goto out;
1512 }
1513
ab4fe3e1 1514 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1515 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1516
1517 switch (hdr->version_major) {
1518 case 1:
1519 {
1520 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1521 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1522 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1523
b5ab16bf
AD
1524 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1525 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1526 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1527 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1528 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1529 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1530 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1531 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1532 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1533 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1534 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1535 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1536 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1537 adev->gfx.cu_info.max_waves_per_simd =
1538 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1539 adev->gfx.cu_info.max_scratch_slots_per_cu =
1540 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1541 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1542 break;
1543 }
1544 default:
1545 dev_err(adev->dev,
1546 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1547 err = -EINVAL;
1548 goto out;
1549 }
1550out:
e2a75f88
AD
1551 return err;
1552}
1553
d38ceaf9
AD
1554static int amdgpu_early_init(struct amdgpu_device *adev)
1555{
aaa36a97 1556 int i, r;
d38ceaf9 1557
483ef985 1558 amdgpu_device_enable_virtual_display(adev);
a6be7570 1559
d38ceaf9 1560 switch (adev->asic_type) {
aaa36a97
AD
1561 case CHIP_TOPAZ:
1562 case CHIP_TONGA:
48299f95 1563 case CHIP_FIJI:
2cc0c0b5
FC
1564 case CHIP_POLARIS11:
1565 case CHIP_POLARIS10:
c4642a47 1566 case CHIP_POLARIS12:
aaa36a97 1567 case CHIP_CARRIZO:
39bb0c92
SL
1568 case CHIP_STONEY:
1569 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1570 adev->family = AMDGPU_FAMILY_CZ;
1571 else
1572 adev->family = AMDGPU_FAMILY_VI;
1573
1574 r = vi_set_ip_blocks(adev);
1575 if (r)
1576 return r;
1577 break;
33f34802
KW
1578#ifdef CONFIG_DRM_AMDGPU_SI
1579 case CHIP_VERDE:
1580 case CHIP_TAHITI:
1581 case CHIP_PITCAIRN:
1582 case CHIP_OLAND:
1583 case CHIP_HAINAN:
295d0daf 1584 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1585 r = si_set_ip_blocks(adev);
1586 if (r)
1587 return r;
1588 break;
1589#endif
a2e73f56
AD
1590#ifdef CONFIG_DRM_AMDGPU_CIK
1591 case CHIP_BONAIRE:
1592 case CHIP_HAWAII:
1593 case CHIP_KAVERI:
1594 case CHIP_KABINI:
1595 case CHIP_MULLINS:
1596 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1597 adev->family = AMDGPU_FAMILY_CI;
1598 else
1599 adev->family = AMDGPU_FAMILY_KV;
1600
1601 r = cik_set_ip_blocks(adev);
1602 if (r)
1603 return r;
1604 break;
1605#endif
2ca8a5d2
CZ
1606 case CHIP_VEGA10:
1607 case CHIP_RAVEN:
1608 if (adev->asic_type == CHIP_RAVEN)
1609 adev->family = AMDGPU_FAMILY_RV;
1610 else
1611 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1612
1613 r = soc15_set_ip_blocks(adev);
1614 if (r)
1615 return r;
1616 break;
d38ceaf9
AD
1617 default:
1618 /* FIXME: not supported yet */
1619 return -EINVAL;
1620 }
1621
e2a75f88
AD
1622 r = amdgpu_device_parse_gpu_info_fw(adev);
1623 if (r)
1624 return r;
1625
3149d9da
XY
1626 if (amdgpu_sriov_vf(adev)) {
1627 r = amdgpu_virt_request_full_gpu(adev, true);
1628 if (r)
1629 return r;
1630 }
1631
d38ceaf9
AD
1632 for (i = 0; i < adev->num_ip_blocks; i++) {
1633 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1634 DRM_ERROR("disabled ip block: %d <%s>\n",
1635 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1636 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1637 } else {
a1255107
AD
1638 if (adev->ip_blocks[i].version->funcs->early_init) {
1639 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1640 if (r == -ENOENT) {
a1255107 1641 adev->ip_blocks[i].status.valid = false;
2c1a2784 1642 } else if (r) {
a1255107
AD
1643 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1644 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1645 return r;
2c1a2784 1646 } else {
a1255107 1647 adev->ip_blocks[i].status.valid = true;
2c1a2784 1648 }
974e6b64 1649 } else {
a1255107 1650 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1651 }
d38ceaf9
AD
1652 }
1653 }
1654
395d1fb9
NH
1655 adev->cg_flags &= amdgpu_cg_mask;
1656 adev->pg_flags &= amdgpu_pg_mask;
1657
d38ceaf9
AD
1658 return 0;
1659}
1660
1661static int amdgpu_init(struct amdgpu_device *adev)
1662{
1663 int i, r;
1664
1665 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1666 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1667 continue;
a1255107 1668 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1669 if (r) {
a1255107
AD
1670 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1671 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1672 return r;
2c1a2784 1673 }
a1255107 1674 adev->ip_blocks[i].status.sw = true;
d38ceaf9 1675 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1676 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9 1677 r = amdgpu_vram_scratch_init(adev);
2c1a2784
AD
1678 if (r) {
1679 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1680 return r;
2c1a2784 1681 }
a1255107 1682 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1683 if (r) {
1684 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1685 return r;
2c1a2784 1686 }
d38ceaf9 1687 r = amdgpu_wb_init(adev);
2c1a2784
AD
1688 if (r) {
1689 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
d38ceaf9 1690 return r;
2c1a2784 1691 }
a1255107 1692 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1693
1694 /* right after GMC hw init, we create CSA */
1695 if (amdgpu_sriov_vf(adev)) {
1696 r = amdgpu_allocate_static_csa(adev);
1697 if (r) {
1698 DRM_ERROR("allocate CSA failed %d\n", r);
1699 return r;
1700 }
1701 }
d38ceaf9
AD
1702 }
1703 }
1704
1705 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1706 if (!adev->ip_blocks[i].status.sw)
d38ceaf9
AD
1707 continue;
1708 /* gmc hw init is done early */
a1255107 1709 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
d38ceaf9 1710 continue;
a1255107 1711 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1712 if (r) {
a1255107
AD
1713 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1714 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1715 return r;
2c1a2784 1716 }
a1255107 1717 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1718 }
1719
1720 return 0;
1721}
1722
0c49e0b8
CZ
1723static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1724{
1725 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1726}
1727
1728static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1729{
1730 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1731 AMDGPU_RESET_MAGIC_NUM);
1732}
1733
2dc80b00 1734static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
d38ceaf9
AD
1735{
1736 int i = 0, r;
1737
1738 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1739 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1740 continue;
4a446d55 1741 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1742 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1743 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1744 /* enable clockgating to save power */
a1255107
AD
1745 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1746 AMD_CG_STATE_GATE);
4a446d55
AD
1747 if (r) {
1748 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1749 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1750 return r;
1751 }
b0b00ff1 1752 }
d38ceaf9 1753 }
2dc80b00
S
1754 return 0;
1755}
1756
1757static int amdgpu_late_init(struct amdgpu_device *adev)
1758{
1759 int i = 0, r;
1760
1761 for (i = 0; i < adev->num_ip_blocks; i++) {
1762 if (!adev->ip_blocks[i].status.valid)
1763 continue;
1764 if (adev->ip_blocks[i].version->funcs->late_init) {
1765 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1766 if (r) {
1767 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1768 adev->ip_blocks[i].version->funcs->name, r);
1769 return r;
1770 }
1771 adev->ip_blocks[i].status.late_initialized = true;
1772 }
1773 }
1774
1775 mod_delayed_work(system_wq, &adev->late_init_work,
1776 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1777
0c49e0b8 1778 amdgpu_fill_reset_magic(adev);
d38ceaf9
AD
1779
1780 return 0;
1781}
1782
1783static int amdgpu_fini(struct amdgpu_device *adev)
1784{
1785 int i, r;
1786
3e96dbfd
AD
1787 /* need to disable SMC first */
1788 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1789 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1790 continue;
a1255107 1791 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1792 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1793 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1794 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1795 if (r) {
1796 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1797 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1798 return r;
1799 }
a1255107 1800 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1801 /* XXX handle errors */
1802 if (r) {
1803 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1804 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1805 }
a1255107 1806 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1807 break;
1808 }
1809 }
1810
d38ceaf9 1811 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1812 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1813 continue;
a1255107 1814 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9
AD
1815 amdgpu_wb_fini(adev);
1816 amdgpu_vram_scratch_fini(adev);
1817 }
8201a67a
RZ
1818
1819 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1820 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1821 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1822 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1823 AMD_CG_STATE_UNGATE);
1824 if (r) {
1825 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1826 adev->ip_blocks[i].version->funcs->name, r);
1827 return r;
1828 }
2c1a2784 1829 }
8201a67a 1830
a1255107 1831 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1832 /* XXX handle errors */
2c1a2784 1833 if (r) {
a1255107
AD
1834 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1835 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1836 }
8201a67a 1837
a1255107 1838 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1839 }
1840
1841 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1842 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1843 continue;
a1255107 1844 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1845 /* XXX handle errors */
2c1a2784 1846 if (r) {
a1255107
AD
1847 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1848 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1849 }
a1255107
AD
1850 adev->ip_blocks[i].status.sw = false;
1851 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1852 }
1853
a6dcfd9c 1854 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1855 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1856 continue;
a1255107
AD
1857 if (adev->ip_blocks[i].version->funcs->late_fini)
1858 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1859 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1860 }
1861
030308fc 1862 if (amdgpu_sriov_vf(adev))
3149d9da 1863 amdgpu_virt_release_full_gpu(adev, false);
2493664f 1864
d38ceaf9
AD
1865 return 0;
1866}
1867
2dc80b00
S
1868static void amdgpu_late_init_func_handler(struct work_struct *work)
1869{
1870 struct amdgpu_device *adev =
1871 container_of(work, struct amdgpu_device, late_init_work.work);
1872 amdgpu_late_set_cg_state(adev);
1873}
1874
faefba95 1875int amdgpu_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1876{
1877 int i, r;
1878
e941ea99
XY
1879 if (amdgpu_sriov_vf(adev))
1880 amdgpu_virt_request_full_gpu(adev, false);
1881
c5a93a28
FC
1882 /* ungate SMC block first */
1883 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1884 AMD_CG_STATE_UNGATE);
1885 if (r) {
1886 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1887 }
1888
d38ceaf9 1889 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1890 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1891 continue;
1892 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1893 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1894 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1895 AMD_CG_STATE_UNGATE);
c5a93a28 1896 if (r) {
a1255107
AD
1897 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1898 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1899 }
2c1a2784 1900 }
d38ceaf9 1901 /* XXX handle errors */
a1255107 1902 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1903 /* XXX handle errors */
2c1a2784 1904 if (r) {
a1255107
AD
1905 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1906 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1907 }
d38ceaf9
AD
1908 }
1909
e941ea99
XY
1910 if (amdgpu_sriov_vf(adev))
1911 amdgpu_virt_release_full_gpu(adev, false);
1912
d38ceaf9
AD
1913 return 0;
1914}
1915
e4f0fdcc 1916static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
a90ad3c2
ML
1917{
1918 int i, r;
1919
2cb681b6
ML
1920 static enum amd_ip_block_type ip_order[] = {
1921 AMD_IP_BLOCK_TYPE_GMC,
1922 AMD_IP_BLOCK_TYPE_COMMON,
2cb681b6
ML
1923 AMD_IP_BLOCK_TYPE_IH,
1924 };
a90ad3c2 1925
2cb681b6
ML
1926 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1927 int j;
1928 struct amdgpu_ip_block *block;
a90ad3c2 1929
2cb681b6
ML
1930 for (j = 0; j < adev->num_ip_blocks; j++) {
1931 block = &adev->ip_blocks[j];
1932
1933 if (block->version->type != ip_order[i] ||
1934 !block->status.valid)
1935 continue;
1936
1937 r = block->version->funcs->hw_init(adev);
1938 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1939 }
1940 }
1941
1942 return 0;
1943}
1944
e4f0fdcc 1945static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
a90ad3c2
ML
1946{
1947 int i, r;
1948
2cb681b6
ML
1949 static enum amd_ip_block_type ip_order[] = {
1950 AMD_IP_BLOCK_TYPE_SMC,
ef4c166d 1951 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
1952 AMD_IP_BLOCK_TYPE_DCE,
1953 AMD_IP_BLOCK_TYPE_GFX,
1954 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
1955 AMD_IP_BLOCK_TYPE_UVD,
1956 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 1957 };
a90ad3c2 1958
2cb681b6
ML
1959 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1960 int j;
1961 struct amdgpu_ip_block *block;
a90ad3c2 1962
2cb681b6
ML
1963 for (j = 0; j < adev->num_ip_blocks; j++) {
1964 block = &adev->ip_blocks[j];
1965
1966 if (block->version->type != ip_order[i] ||
1967 !block->status.valid)
1968 continue;
1969
1970 r = block->version->funcs->hw_init(adev);
1971 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1972 }
1973 }
1974
1975 return 0;
1976}
1977
fcf0649f 1978static int amdgpu_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
1979{
1980 int i, r;
1981
a90ad3c2
ML
1982 for (i = 0; i < adev->num_ip_blocks; i++) {
1983 if (!adev->ip_blocks[i].status.valid)
1984 continue;
a90ad3c2
ML
1985 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1986 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
fcf0649f
CZ
1987 adev->ip_blocks[i].version->type ==
1988 AMD_IP_BLOCK_TYPE_IH) {
1989 r = adev->ip_blocks[i].version->funcs->resume(adev);
1990 if (r) {
1991 DRM_ERROR("resume of IP block <%s> failed %d\n",
1992 adev->ip_blocks[i].version->funcs->name, r);
1993 return r;
1994 }
a90ad3c2
ML
1995 }
1996 }
1997
1998 return 0;
1999}
2000
fcf0649f 2001static int amdgpu_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2002{
2003 int i, r;
2004
2005 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2006 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2007 continue;
fcf0649f
CZ
2008 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2009 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2010 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
2011 continue;
a1255107 2012 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 2013 if (r) {
a1255107
AD
2014 DRM_ERROR("resume of IP block <%s> failed %d\n",
2015 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 2016 return r;
2c1a2784 2017 }
d38ceaf9
AD
2018 }
2019
2020 return 0;
2021}
2022
fcf0649f
CZ
2023static int amdgpu_resume(struct amdgpu_device *adev)
2024{
2025 int r;
2026
2027 r = amdgpu_resume_phase1(adev);
2028 if (r)
2029 return r;
2030 r = amdgpu_resume_phase2(adev);
2031
2032 return r;
2033}
2034
4e99a44e 2035static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 2036{
6867e1b5
ML
2037 if (amdgpu_sriov_vf(adev)) {
2038 if (adev->is_atom_fw) {
2039 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2040 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2041 } else {
2042 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2043 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2044 }
2045
2046 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2047 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 2048 }
048765ad
AR
2049}
2050
4562236b
HW
2051bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2052{
2053 switch (asic_type) {
2054#if defined(CONFIG_DRM_AMD_DC)
2055 case CHIP_BONAIRE:
0d6fbccb 2056 case CHIP_KAVERI:
291d5c21
AD
2057 case CHIP_KABINI:
2058 case CHIP_MULLINS:
5727b509
HW
2059 /*
2060 * We have systems in the wild with these ASICs that require
2061 * LVDS and VGA support which is not supported with DC.
2062 *
2063 * Fallback to the non-DC driver here by default so as not to
2064 * cause regressions.
2065 */
2066 return amdgpu_dc > 0;
2067 case CHIP_HAWAII:
4562236b
HW
2068 case CHIP_CARRIZO:
2069 case CHIP_STONEY:
2070 case CHIP_POLARIS11:
2071 case CHIP_POLARIS10:
2c8ad2d5 2072 case CHIP_POLARIS12:
4562236b
HW
2073 case CHIP_TONGA:
2074 case CHIP_FIJI:
2075#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
2076 return amdgpu_dc != 0;
4562236b 2077#endif
42f8ffa1
HW
2078 case CHIP_VEGA10:
2079#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 2080 case CHIP_RAVEN:
42f8ffa1 2081#endif
fd187853 2082 return amdgpu_dc != 0;
4562236b
HW
2083#endif
2084 default:
2085 return false;
2086 }
2087}
2088
2089/**
2090 * amdgpu_device_has_dc_support - check if dc is supported
2091 *
2092 * @adev: amdgpu_device_pointer
2093 *
2094 * Returns true for supported, false for not supported
2095 */
2096bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2097{
2555039d
XY
2098 if (amdgpu_sriov_vf(adev))
2099 return false;
2100
4562236b
HW
2101 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2102}
2103
d38ceaf9
AD
2104/**
2105 * amdgpu_device_init - initialize the driver
2106 *
2107 * @adev: amdgpu_device pointer
2108 * @pdev: drm dev pointer
2109 * @pdev: pci dev pointer
2110 * @flags: driver flags
2111 *
2112 * Initializes the driver info and hw (all asics).
2113 * Returns 0 for success or an error on failure.
2114 * Called at driver startup.
2115 */
2116int amdgpu_device_init(struct amdgpu_device *adev,
2117 struct drm_device *ddev,
2118 struct pci_dev *pdev,
2119 uint32_t flags)
2120{
2121 int r, i;
2122 bool runtime = false;
95844d20 2123 u32 max_MBps;
d38ceaf9
AD
2124
2125 adev->shutdown = false;
2126 adev->dev = &pdev->dev;
2127 adev->ddev = ddev;
2128 adev->pdev = pdev;
2129 adev->flags = flags;
2f7d10b3 2130 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 2131 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
6f02a696 2132 adev->mc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
2133 adev->accel_working = false;
2134 adev->num_rings = 0;
2135 adev->mman.buffer_funcs = NULL;
2136 adev->mman.buffer_funcs_ring = NULL;
2137 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 2138 adev->vm_manager.vm_pte_num_rings = 0;
d38ceaf9 2139 adev->gart.gart_funcs = NULL;
f54d1867 2140 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 2141 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
2142
2143 adev->smc_rreg = &amdgpu_invalid_rreg;
2144 adev->smc_wreg = &amdgpu_invalid_wreg;
2145 adev->pcie_rreg = &amdgpu_invalid_rreg;
2146 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
2147 adev->pciep_rreg = &amdgpu_invalid_rreg;
2148 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2149 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2150 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2151 adev->didt_rreg = &amdgpu_invalid_rreg;
2152 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
2153 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2154 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2155 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2156 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2157
3e39ab90
AD
2158 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2159 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2160 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
2161
2162 /* mutex initialization are all done here so we
2163 * can recall function without having locking issues */
d38ceaf9 2164 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 2165 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
2166 mutex_init(&adev->pm.mutex);
2167 mutex_init(&adev->gfx.gpu_clock_mutex);
2168 mutex_init(&adev->srbm_mutex);
b8866c26 2169 mutex_init(&adev->gfx.pipe_reserve_mutex);
d38ceaf9 2170 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 2171 mutex_init(&adev->mn_lock);
e23b74aa 2172 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9
AD
2173 hash_init(adev->mn_hash);
2174
2175 amdgpu_check_arguments(adev);
2176
d38ceaf9
AD
2177 spin_lock_init(&adev->mmio_idx_lock);
2178 spin_lock_init(&adev->smc_idx_lock);
2179 spin_lock_init(&adev->pcie_idx_lock);
2180 spin_lock_init(&adev->uvd_ctx_idx_lock);
2181 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 2182 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 2183 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 2184 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 2185 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 2186
0c4e7fa5
CZ
2187 INIT_LIST_HEAD(&adev->shadow_list);
2188 mutex_init(&adev->shadow_list_lock);
2189
5c1354bd
CZ
2190 INIT_LIST_HEAD(&adev->gtt_list);
2191 spin_lock_init(&adev->gtt_list_lock);
2192
795f2813
AR
2193 INIT_LIST_HEAD(&adev->ring_lru_list);
2194 spin_lock_init(&adev->ring_lru_list_lock);
2195
2dc80b00
S
2196 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2197
0fa49558
AX
2198 /* Registers mapping */
2199 /* TODO: block userspace mapping of io register */
da69c161
KW
2200 if (adev->asic_type >= CHIP_BONAIRE) {
2201 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2202 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2203 } else {
2204 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2205 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2206 }
d38ceaf9 2207
d38ceaf9
AD
2208 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2209 if (adev->rmmio == NULL) {
2210 return -ENOMEM;
2211 }
2212 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2213 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2214
705e519e
CK
2215 /* doorbell bar mapping */
2216 amdgpu_doorbell_init(adev);
d38ceaf9
AD
2217
2218 /* io port mapping */
2219 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2220 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2221 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2222 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2223 break;
2224 }
2225 }
2226 if (adev->rio_mem == NULL)
b64a18c5 2227 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
2228
2229 /* early init functions */
2230 r = amdgpu_early_init(adev);
2231 if (r)
2232 return r;
2233
2234 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2235 /* this will fail for cards that aren't VGA class devices, just
2236 * ignore it */
2237 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2238
e9bef455 2239 if (amdgpu_device_is_px(ddev))
d38ceaf9 2240 runtime = true;
84c8b22e
LW
2241 if (!pci_is_thunderbolt_attached(adev->pdev))
2242 vga_switcheroo_register_client(adev->pdev,
2243 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
2244 if (runtime)
2245 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2246
2247 /* Read BIOS */
83ba126a
AD
2248 if (!amdgpu_get_bios(adev)) {
2249 r = -EINVAL;
2250 goto failed;
2251 }
f7e9e9fe 2252
d38ceaf9 2253 r = amdgpu_atombios_init(adev);
2c1a2784
AD
2254 if (r) {
2255 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 2256 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 2257 goto failed;
2c1a2784 2258 }
d38ceaf9 2259
4e99a44e
ML
2260 /* detect if we are with an SRIOV vbios */
2261 amdgpu_device_detect_sriov_bios(adev);
048765ad 2262
d38ceaf9 2263 /* Post card if necessary */
91fe77eb 2264 if (amdgpu_need_post(adev)) {
d38ceaf9 2265 if (!adev->bios) {
bec86378 2266 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
2267 r = -EINVAL;
2268 goto failed;
d38ceaf9 2269 }
bec86378 2270 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
2271 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2272 if (r) {
2273 dev_err(adev->dev, "gpu post error!\n");
2274 goto failed;
2275 }
2276 } else {
2277 DRM_INFO("GPU post is not needed\n");
d38ceaf9
AD
2278 }
2279
88b64e95
AD
2280 if (adev->is_atom_fw) {
2281 /* Initialize clocks */
2282 r = amdgpu_atomfirmware_get_clock_info(adev);
2283 if (r) {
2284 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 2285 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
2286 goto failed;
2287 }
2288 } else {
a5bde2f9
AD
2289 /* Initialize clocks */
2290 r = amdgpu_atombios_get_clock_info(adev);
2291 if (r) {
2292 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 2293 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 2294 goto failed;
a5bde2f9
AD
2295 }
2296 /* init i2c buses */
4562236b
HW
2297 if (!amdgpu_device_has_dc_support(adev))
2298 amdgpu_atombios_i2c_init(adev);
2c1a2784 2299 }
d38ceaf9
AD
2300
2301 /* Fence driver */
2302 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
2303 if (r) {
2304 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 2305 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 2306 goto failed;
2c1a2784 2307 }
d38ceaf9
AD
2308
2309 /* init the mode config */
2310 drm_mode_config_init(adev->ddev);
2311
2312 r = amdgpu_init(adev);
2313 if (r) {
2c1a2784 2314 dev_err(adev->dev, "amdgpu_init failed\n");
e23b74aa 2315 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
d38ceaf9 2316 amdgpu_fini(adev);
83ba126a 2317 goto failed;
d38ceaf9
AD
2318 }
2319
2320 adev->accel_working = true;
2321
e59c0205
AX
2322 amdgpu_vm_check_compute_bug(adev);
2323
95844d20
MO
2324 /* Initialize the buffer migration limit. */
2325 if (amdgpu_moverate >= 0)
2326 max_MBps = amdgpu_moverate;
2327 else
2328 max_MBps = 8; /* Allow 8 MB/s. */
2329 /* Get a log2 for easy divisions. */
2330 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2331
d38ceaf9
AD
2332 r = amdgpu_ib_pool_init(adev);
2333 if (r) {
2334 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 2335 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 2336 goto failed;
d38ceaf9
AD
2337 }
2338
2339 r = amdgpu_ib_ring_tests(adev);
2340 if (r)
2341 DRM_ERROR("ib ring test failed (%d).\n", r);
2342
2dc8f81e
HC
2343 if (amdgpu_sriov_vf(adev))
2344 amdgpu_virt_init_data_exchange(adev);
2345
9bc92b9c
ML
2346 amdgpu_fbdev_init(adev);
2347
d2f52ac8
RZ
2348 r = amdgpu_pm_sysfs_init(adev);
2349 if (r)
2350 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2351
d38ceaf9 2352 r = amdgpu_gem_debugfs_init(adev);
3f14e623 2353 if (r)
d38ceaf9 2354 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2355
2356 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2357 if (r)
d38ceaf9 2358 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2359
4f0955fc
HR
2360 r = amdgpu_debugfs_test_ib_ring_init(adev);
2361 if (r)
2362 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
2363
50ab2533 2364 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2365 if (r)
50ab2533 2366 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2367
db95e218
KR
2368 r = amdgpu_debugfs_vbios_dump_init(adev);
2369 if (r)
2370 DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
2371
d38ceaf9
AD
2372 if ((amdgpu_testing & 1)) {
2373 if (adev->accel_working)
2374 amdgpu_test_moves(adev);
2375 else
2376 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2377 }
d38ceaf9
AD
2378 if (amdgpu_benchmarking) {
2379 if (adev->accel_working)
2380 amdgpu_benchmark(adev, amdgpu_benchmarking);
2381 else
2382 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2383 }
2384
2385 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2386 * explicit gating rather than handling it automatically.
2387 */
2388 r = amdgpu_late_init(adev);
2c1a2784
AD
2389 if (r) {
2390 dev_err(adev->dev, "amdgpu_late_init failed\n");
e23b74aa 2391 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2392 goto failed;
2c1a2784 2393 }
d38ceaf9
AD
2394
2395 return 0;
83ba126a
AD
2396
2397failed:
89041940 2398 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2399 if (runtime)
2400 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2401 return r;
d38ceaf9
AD
2402}
2403
d38ceaf9
AD
2404/**
2405 * amdgpu_device_fini - tear down the driver
2406 *
2407 * @adev: amdgpu_device pointer
2408 *
2409 * Tear down the driver info (all asics).
2410 * Called at driver shutdown.
2411 */
2412void amdgpu_device_fini(struct amdgpu_device *adev)
2413{
2414 int r;
2415
2416 DRM_INFO("amdgpu: finishing device.\n");
2417 adev->shutdown = true;
db2c2a97
PD
2418 if (adev->mode_info.mode_config_initialized)
2419 drm_crtc_force_disable_all(adev->ddev);
d38ceaf9
AD
2420 /* evict vram memory */
2421 amdgpu_bo_evict_vram(adev);
2422 amdgpu_ib_pool_fini(adev);
a05502e5 2423 amdgpu_fw_reserve_vram_fini(adev);
d38ceaf9
AD
2424 amdgpu_fence_driver_fini(adev);
2425 amdgpu_fbdev_fini(adev);
2426 r = amdgpu_fini(adev);
ab4fe3e1
HR
2427 if (adev->firmware.gpu_info_fw) {
2428 release_firmware(adev->firmware.gpu_info_fw);
2429 adev->firmware.gpu_info_fw = NULL;
2430 }
d38ceaf9 2431 adev->accel_working = false;
2dc80b00 2432 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2433 /* free i2c buses */
4562236b
HW
2434 if (!amdgpu_device_has_dc_support(adev))
2435 amdgpu_i2c_fini(adev);
d38ceaf9
AD
2436 amdgpu_atombios_fini(adev);
2437 kfree(adev->bios);
2438 adev->bios = NULL;
84c8b22e
LW
2439 if (!pci_is_thunderbolt_attached(adev->pdev))
2440 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2441 if (adev->flags & AMD_IS_PX)
2442 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2443 vga_client_register(adev->pdev, NULL, NULL, NULL);
2444 if (adev->rio_mem)
2445 pci_iounmap(adev->pdev, adev->rio_mem);
2446 adev->rio_mem = NULL;
2447 iounmap(adev->rmmio);
2448 adev->rmmio = NULL;
705e519e 2449 amdgpu_doorbell_fini(adev);
d2f52ac8 2450 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2451 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2452}
2453
2454
2455/*
2456 * Suspend & resume.
2457 */
2458/**
810ddc3a 2459 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2460 *
2461 * @pdev: drm dev pointer
2462 * @state: suspend state
2463 *
2464 * Puts the hw in the suspend state (all asics).
2465 * Returns 0 for success or an error on failure.
2466 * Called at driver suspend.
2467 */
810ddc3a 2468int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2469{
2470 struct amdgpu_device *adev;
2471 struct drm_crtc *crtc;
2472 struct drm_connector *connector;
5ceb54c6 2473 int r;
d38ceaf9
AD
2474
2475 if (dev == NULL || dev->dev_private == NULL) {
2476 return -ENODEV;
2477 }
2478
2479 adev = dev->dev_private;
2480
2481 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2482 return 0;
2483
2484 drm_kms_helper_poll_disable(dev);
2485
4562236b
HW
2486 if (!amdgpu_device_has_dc_support(adev)) {
2487 /* turn off display hw */
2488 drm_modeset_lock_all(dev);
2489 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2490 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2491 }
2492 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2493 }
2494
ba997709
YZ
2495 amdgpu_amdkfd_suspend(adev);
2496
756e6880 2497 /* unpin the front buffers and cursors */
d38ceaf9 2498 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2499 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2500 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2501 struct amdgpu_bo *robj;
2502
756e6880
AD
2503 if (amdgpu_crtc->cursor_bo) {
2504 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2505 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2506 if (r == 0) {
2507 amdgpu_bo_unpin(aobj);
2508 amdgpu_bo_unreserve(aobj);
2509 }
2510 }
2511
d38ceaf9
AD
2512 if (rfb == NULL || rfb->obj == NULL) {
2513 continue;
2514 }
2515 robj = gem_to_amdgpu_bo(rfb->obj);
2516 /* don't unpin kernel fb objects */
2517 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2518 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2519 if (r == 0) {
2520 amdgpu_bo_unpin(robj);
2521 amdgpu_bo_unreserve(robj);
2522 }
2523 }
2524 }
2525 /* evict vram memory */
2526 amdgpu_bo_evict_vram(adev);
2527
5ceb54c6 2528 amdgpu_fence_driver_suspend(adev);
d38ceaf9
AD
2529
2530 r = amdgpu_suspend(adev);
2531
a0a71e49
AD
2532 /* evict remaining vram memory
2533 * This second call to evict vram is to evict the gart page table
2534 * using the CPU.
2535 */
d38ceaf9
AD
2536 amdgpu_bo_evict_vram(adev);
2537
d05da0e2 2538 amdgpu_atombios_scratch_regs_save(adev);
d38ceaf9
AD
2539 pci_save_state(dev->pdev);
2540 if (suspend) {
2541 /* Shut down the device */
2542 pci_disable_device(dev->pdev);
2543 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2544 } else {
2545 r = amdgpu_asic_reset(adev);
2546 if (r)
2547 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2548 }
2549
2550 if (fbcon) {
2551 console_lock();
2552 amdgpu_fbdev_set_suspend(adev, 1);
2553 console_unlock();
2554 }
2555 return 0;
2556}
2557
2558/**
810ddc3a 2559 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2560 *
2561 * @pdev: drm dev pointer
2562 *
2563 * Bring the hw back to operating state (all asics).
2564 * Returns 0 for success or an error on failure.
2565 * Called at driver resume.
2566 */
810ddc3a 2567int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2568{
2569 struct drm_connector *connector;
2570 struct amdgpu_device *adev = dev->dev_private;
756e6880 2571 struct drm_crtc *crtc;
03161a6e 2572 int r = 0;
d38ceaf9
AD
2573
2574 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2575 return 0;
2576
74b0b157 2577 if (fbcon)
d38ceaf9 2578 console_lock();
74b0b157 2579
d38ceaf9
AD
2580 if (resume) {
2581 pci_set_power_state(dev->pdev, PCI_D0);
2582 pci_restore_state(dev->pdev);
74b0b157 2583 r = pci_enable_device(dev->pdev);
03161a6e
HR
2584 if (r)
2585 goto unlock;
d38ceaf9 2586 }
d05da0e2 2587 amdgpu_atombios_scratch_regs_restore(adev);
d38ceaf9
AD
2588
2589 /* post card */
c836fec5 2590 if (amdgpu_need_post(adev)) {
74b0b157 2591 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2592 if (r)
2593 DRM_ERROR("amdgpu asic init failed\n");
2594 }
d38ceaf9
AD
2595
2596 r = amdgpu_resume(adev);
e6707218 2597 if (r) {
ca198528 2598 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
03161a6e 2599 goto unlock;
e6707218 2600 }
5ceb54c6
AD
2601 amdgpu_fence_driver_resume(adev);
2602
ca198528
FC
2603 if (resume) {
2604 r = amdgpu_ib_ring_tests(adev);
2605 if (r)
2606 DRM_ERROR("ib ring test failed (%d).\n", r);
2607 }
d38ceaf9
AD
2608
2609 r = amdgpu_late_init(adev);
03161a6e
HR
2610 if (r)
2611 goto unlock;
d38ceaf9 2612
756e6880
AD
2613 /* pin cursors */
2614 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2615 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2616
2617 if (amdgpu_crtc->cursor_bo) {
2618 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2619 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2620 if (r == 0) {
2621 r = amdgpu_bo_pin(aobj,
2622 AMDGPU_GEM_DOMAIN_VRAM,
2623 &amdgpu_crtc->cursor_addr);
2624 if (r != 0)
2625 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2626 amdgpu_bo_unreserve(aobj);
2627 }
2628 }
2629 }
ba997709
YZ
2630 r = amdgpu_amdkfd_resume(adev);
2631 if (r)
2632 return r;
756e6880 2633
d38ceaf9
AD
2634 /* blat the mode back in */
2635 if (fbcon) {
4562236b
HW
2636 if (!amdgpu_device_has_dc_support(adev)) {
2637 /* pre DCE11 */
2638 drm_helper_resume_force_mode(dev);
2639
2640 /* turn on display hw */
2641 drm_modeset_lock_all(dev);
2642 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2643 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2644 }
2645 drm_modeset_unlock_all(dev);
2646 } else {
2647 /*
2648 * There is no equivalent atomic helper to turn on
2649 * display, so we defined our own function for this,
2650 * once suspend resume is supported by the atomic
2651 * framework this will be reworked
2652 */
2653 amdgpu_dm_display_resume(adev);
d38ceaf9
AD
2654 }
2655 }
2656
2657 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2658
2659 /*
2660 * Most of the connector probing functions try to acquire runtime pm
2661 * refs to ensure that the GPU is powered on when connector polling is
2662 * performed. Since we're calling this from a runtime PM callback,
2663 * trying to acquire rpm refs will cause us to deadlock.
2664 *
2665 * Since we're guaranteed to be holding the rpm lock, it's safe to
2666 * temporarily disable the rpm helpers so this doesn't deadlock us.
2667 */
2668#ifdef CONFIG_PM
2669 dev->dev->power.disable_depth++;
2670#endif
4562236b
HW
2671 if (!amdgpu_device_has_dc_support(adev))
2672 drm_helper_hpd_irq_event(dev);
2673 else
2674 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2675#ifdef CONFIG_PM
2676 dev->dev->power.disable_depth--;
2677#endif
d38ceaf9 2678
03161a6e 2679 if (fbcon)
d38ceaf9 2680 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2681
2682unlock:
2683 if (fbcon)
d38ceaf9 2684 console_unlock();
d38ceaf9 2685
03161a6e 2686 return r;
d38ceaf9
AD
2687}
2688
63fbf42f
CZ
2689static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2690{
2691 int i;
2692 bool asic_hang = false;
2693
f993d628
ML
2694 if (amdgpu_sriov_vf(adev))
2695 return true;
2696
63fbf42f 2697 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2698 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2699 continue;
a1255107
AD
2700 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2701 adev->ip_blocks[i].status.hang =
2702 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2703 if (adev->ip_blocks[i].status.hang) {
2704 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2705 asic_hang = true;
2706 }
2707 }
2708 return asic_hang;
2709}
2710
4d446656 2711static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2712{
2713 int i, r = 0;
2714
2715 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2716 if (!adev->ip_blocks[i].status.valid)
d31a501e 2717 continue;
a1255107
AD
2718 if (adev->ip_blocks[i].status.hang &&
2719 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2720 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2721 if (r)
2722 return r;
2723 }
2724 }
2725
2726 return 0;
2727}
2728
35d782fe
CZ
2729static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2730{
da146d3b
AD
2731 int i;
2732
2733 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2734 if (!adev->ip_blocks[i].status.valid)
da146d3b 2735 continue;
a1255107
AD
2736 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2737 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2738 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
2739 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2740 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 2741 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2742 DRM_INFO("Some block need full reset!\n");
2743 return true;
2744 }
2745 }
35d782fe
CZ
2746 }
2747 return false;
2748}
2749
2750static int amdgpu_soft_reset(struct amdgpu_device *adev)
2751{
2752 int i, r = 0;
2753
2754 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2755 if (!adev->ip_blocks[i].status.valid)
35d782fe 2756 continue;
a1255107
AD
2757 if (adev->ip_blocks[i].status.hang &&
2758 adev->ip_blocks[i].version->funcs->soft_reset) {
2759 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2760 if (r)
2761 return r;
2762 }
2763 }
2764
2765 return 0;
2766}
2767
2768static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2769{
2770 int i, r = 0;
2771
2772 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2773 if (!adev->ip_blocks[i].status.valid)
35d782fe 2774 continue;
a1255107
AD
2775 if (adev->ip_blocks[i].status.hang &&
2776 adev->ip_blocks[i].version->funcs->post_soft_reset)
2777 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2778 if (r)
2779 return r;
2780 }
2781
2782 return 0;
2783}
2784
3ad81f16
CZ
2785bool amdgpu_need_backup(struct amdgpu_device *adev)
2786{
2787 if (adev->flags & AMD_IS_APU)
2788 return false;
2789
2790 return amdgpu_lockup_timeout > 0 ? true : false;
2791}
2792
53cdccd5
CZ
2793static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2794 struct amdgpu_ring *ring,
2795 struct amdgpu_bo *bo,
f54d1867 2796 struct dma_fence **fence)
53cdccd5
CZ
2797{
2798 uint32_t domain;
2799 int r;
2800
23d2e504
RH
2801 if (!bo->shadow)
2802 return 0;
2803
1d284797 2804 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2805 if (r)
2806 return r;
2807 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2808 /* if bo has been evicted, then no need to recover */
2809 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2810 r = amdgpu_bo_validate(bo->shadow);
2811 if (r) {
2812 DRM_ERROR("bo validate failed!\n");
2813 goto err;
2814 }
2815
23d2e504 2816 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2817 NULL, fence, true);
23d2e504
RH
2818 if (r) {
2819 DRM_ERROR("recover page table failed!\n");
2820 goto err;
2821 }
2822 }
53cdccd5 2823err:
23d2e504
RH
2824 amdgpu_bo_unreserve(bo);
2825 return r;
53cdccd5
CZ
2826}
2827
a90ad3c2
ML
2828/**
2829 * amdgpu_sriov_gpu_reset - reset the asic
2830 *
2831 * @adev: amdgpu device pointer
7225f873 2832 * @job: which job trigger hang
a90ad3c2
ML
2833 *
2834 * Attempt the reset the GPU if it has hung (all asics).
2835 * for SRIOV case.
2836 * Returns 0 for success or an error on failure.
2837 */
7225f873 2838int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
a90ad3c2 2839{
65781c78 2840 int i, j, r = 0;
a90ad3c2
ML
2841 int resched;
2842 struct amdgpu_bo *bo, *tmp;
2843 struct amdgpu_ring *ring;
2844 struct dma_fence *fence = NULL, *next = NULL;
2845
147b5983 2846 mutex_lock(&adev->virt.lock_reset);
a90ad3c2 2847 atomic_inc(&adev->gpu_reset_counter);
3224a12b 2848 adev->in_sriov_reset = true;
a90ad3c2
ML
2849
2850 /* block TTM */
2851 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2852
65781c78
ML
2853 /* we start from the ring trigger GPU hang */
2854 j = job ? job->ring->idx : 0;
a90ad3c2 2855
65781c78
ML
2856 /* block scheduler */
2857 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2858 ring = adev->rings[i % AMDGPU_MAX_RINGS];
a90ad3c2
ML
2859 if (!ring || !ring->sched.thread)
2860 continue;
2861
2862 kthread_park(ring->sched.thread);
65781c78
ML
2863
2864 if (job && j != i)
2865 continue;
2866
4f059ecd 2867 /* here give the last chance to check if job removed from mirror-list
65781c78 2868 * since we already pay some time on kthread_park */
4f059ecd 2869 if (job && list_empty(&job->base.node)) {
65781c78
ML
2870 kthread_unpark(ring->sched.thread);
2871 goto give_up_reset;
2872 }
2873
2874 if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
2875 amd_sched_job_kickout(&job->base);
2876
2877 /* only do job_reset on the hang ring if @job not NULL */
a90ad3c2 2878 amd_sched_hw_job_reset(&ring->sched);
a90ad3c2 2879
65781c78
ML
2880 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2881 amdgpu_fence_driver_force_completion_ring(ring);
2882 }
a90ad3c2
ML
2883
2884 /* request to take full control of GPU before re-initialization */
7225f873 2885 if (job)
a90ad3c2
ML
2886 amdgpu_virt_reset_gpu(adev);
2887 else
2888 amdgpu_virt_request_full_gpu(adev, true);
2889
2890
2891 /* Resume IP prior to SMC */
e4f0fdcc 2892 amdgpu_sriov_reinit_early(adev);
a90ad3c2
ML
2893
2894 /* we need recover gart prior to run SMC/CP/SDMA resume */
2895 amdgpu_ttm_recover_gart(adev);
2896
2897 /* now we are okay to resume SMC/CP/SDMA */
e4f0fdcc 2898 amdgpu_sriov_reinit_late(adev);
a90ad3c2
ML
2899
2900 amdgpu_irq_gpu_reset_resume_helper(adev);
2901
2902 if (amdgpu_ib_ring_tests(adev))
2903 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2904
2905 /* release full control of GPU after ib test */
2906 amdgpu_virt_release_full_gpu(adev, true);
2907
2908 DRM_INFO("recover vram bo from shadow\n");
2909
2910 ring = adev->mman.buffer_funcs_ring;
2911 mutex_lock(&adev->shadow_list_lock);
2912 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 2913 next = NULL;
a90ad3c2
ML
2914 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2915 if (fence) {
2916 r = dma_fence_wait(fence, false);
2917 if (r) {
2918 WARN(r, "recovery from shadow isn't completed\n");
2919 break;
2920 }
2921 }
2922
2923 dma_fence_put(fence);
2924 fence = next;
2925 }
2926 mutex_unlock(&adev->shadow_list_lock);
2927
2928 if (fence) {
2929 r = dma_fence_wait(fence, false);
2930 if (r)
2931 WARN(r, "recovery from shadow isn't completed\n");
2932 }
2933 dma_fence_put(fence);
2934
65781c78
ML
2935 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2936 ring = adev->rings[i % AMDGPU_MAX_RINGS];
a90ad3c2
ML
2937 if (!ring || !ring->sched.thread)
2938 continue;
2939
65781c78
ML
2940 if (job && j != i) {
2941 kthread_unpark(ring->sched.thread);
2942 continue;
2943 }
2944
a90ad3c2
ML
2945 amd_sched_job_recovery(&ring->sched);
2946 kthread_unpark(ring->sched.thread);
2947 }
2948
2949 drm_helper_resume_force_mode(adev->ddev);
65781c78 2950give_up_reset:
a90ad3c2
ML
2951 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2952 if (r) {
2953 /* bad news, how to tell it to userspace ? */
2954 dev_info(adev->dev, "GPU reset failed\n");
65781c78
ML
2955 } else {
2956 dev_info(adev->dev, "GPU reset successed!\n");
a90ad3c2
ML
2957 }
2958
3224a12b 2959 adev->in_sriov_reset = false;
147b5983 2960 mutex_unlock(&adev->virt.lock_reset);
a90ad3c2
ML
2961 return r;
2962}
2963
d38ceaf9
AD
2964/**
2965 * amdgpu_gpu_reset - reset the asic
2966 *
2967 * @adev: amdgpu device pointer
2968 *
2969 * Attempt the reset the GPU if it has hung (all asics).
2970 * Returns 0 for success or an error on failure.
2971 */
2972int amdgpu_gpu_reset(struct amdgpu_device *adev)
2973{
4562236b 2974 struct drm_atomic_state *state = NULL;
d38ceaf9
AD
2975 int i, r;
2976 int resched;
0c49e0b8 2977 bool need_full_reset, vram_lost = false;
fb140b29 2978
63fbf42f
CZ
2979 if (!amdgpu_check_soft_reset(adev)) {
2980 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2981 return 0;
2982 }
d38ceaf9 2983
d94aed5a 2984 atomic_inc(&adev->gpu_reset_counter);
d38ceaf9 2985
a3c47d6b
CZ
2986 /* block TTM */
2987 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
4562236b
HW
2988 /* store modesetting */
2989 if (amdgpu_device_has_dc_support(adev))
2990 state = drm_atomic_helper_suspend(adev->ddev);
a3c47d6b 2991
0875dc9e
CZ
2992 /* block scheduler */
2993 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2994 struct amdgpu_ring *ring = adev->rings[i];
2995
51687759 2996 if (!ring || !ring->sched.thread)
0875dc9e
CZ
2997 continue;
2998 kthread_park(ring->sched.thread);
aa1c8900 2999 amd_sched_hw_job_reset(&ring->sched);
0875dc9e 3000 }
2200edac
CZ
3001 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3002 amdgpu_fence_driver_force_completion(adev);
d38ceaf9 3003
35d782fe 3004 need_full_reset = amdgpu_need_full_reset(adev);
d38ceaf9 3005
35d782fe
CZ
3006 if (!need_full_reset) {
3007 amdgpu_pre_soft_reset(adev);
3008 r = amdgpu_soft_reset(adev);
3009 amdgpu_post_soft_reset(adev);
3010 if (r || amdgpu_check_soft_reset(adev)) {
3011 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3012 need_full_reset = true;
3013 }
f1aa7e08
CZ
3014 }
3015
35d782fe 3016 if (need_full_reset) {
35d782fe 3017 r = amdgpu_suspend(adev);
bfa99269 3018
35d782fe 3019retry:
d05da0e2 3020 amdgpu_atombios_scratch_regs_save(adev);
35d782fe 3021 r = amdgpu_asic_reset(adev);
d05da0e2 3022 amdgpu_atombios_scratch_regs_restore(adev);
35d782fe
CZ
3023 /* post card */
3024 amdgpu_atom_asic_init(adev->mode_info.atom_context);
3025
3026 if (!r) {
3027 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
fcf0649f
CZ
3028 r = amdgpu_resume_phase1(adev);
3029 if (r)
3030 goto out;
0c49e0b8 3031 vram_lost = amdgpu_check_vram_lost(adev);
f1892138 3032 if (vram_lost) {
0c49e0b8 3033 DRM_ERROR("VRAM is lost!\n");
f1892138
CZ
3034 atomic_inc(&adev->vram_lost_counter);
3035 }
fcf0649f
CZ
3036 r = amdgpu_ttm_recover_gart(adev);
3037 if (r)
3038 goto out;
3039 r = amdgpu_resume_phase2(adev);
3040 if (r)
3041 goto out;
0c49e0b8
CZ
3042 if (vram_lost)
3043 amdgpu_fill_reset_magic(adev);
35d782fe 3044 }
d38ceaf9 3045 }
fcf0649f 3046out:
d38ceaf9 3047 if (!r) {
e72cfd58 3048 amdgpu_irq_gpu_reset_resume_helper(adev);
1f465087
CZ
3049 r = amdgpu_ib_ring_tests(adev);
3050 if (r) {
3051 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
40019dc4 3052 r = amdgpu_suspend(adev);
53cdccd5 3053 need_full_reset = true;
40019dc4 3054 goto retry;
1f465087 3055 }
53cdccd5
CZ
3056 /**
3057 * recovery vm page tables, since we cannot depend on VRAM is
3058 * consistent after gpu full reset.
3059 */
3060 if (need_full_reset && amdgpu_need_backup(adev)) {
3061 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
3062 struct amdgpu_bo *bo, *tmp;
f54d1867 3063 struct dma_fence *fence = NULL, *next = NULL;
53cdccd5
CZ
3064
3065 DRM_INFO("recover vram bo from shadow\n");
3066 mutex_lock(&adev->shadow_list_lock);
3067 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 3068 next = NULL;
53cdccd5
CZ
3069 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
3070 if (fence) {
f54d1867 3071 r = dma_fence_wait(fence, false);
53cdccd5 3072 if (r) {
1d7b17b0 3073 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5
CZ
3074 break;
3075 }
3076 }
1f465087 3077
f54d1867 3078 dma_fence_put(fence);
53cdccd5
CZ
3079 fence = next;
3080 }
3081 mutex_unlock(&adev->shadow_list_lock);
3082 if (fence) {
f54d1867 3083 r = dma_fence_wait(fence, false);
53cdccd5 3084 if (r)
1d7b17b0 3085 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5 3086 }
f54d1867 3087 dma_fence_put(fence);
53cdccd5 3088 }
d38ceaf9
AD
3089 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3090 struct amdgpu_ring *ring = adev->rings[i];
51687759
CZ
3091
3092 if (!ring || !ring->sched.thread)
d38ceaf9 3093 continue;
53cdccd5 3094
aa1c8900 3095 amd_sched_job_recovery(&ring->sched);
0875dc9e 3096 kthread_unpark(ring->sched.thread);
d38ceaf9 3097 }
d38ceaf9 3098 } else {
2200edac 3099 dev_err(adev->dev, "asic resume failed (%d).\n", r);
d38ceaf9 3100 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
51687759 3101 if (adev->rings[i] && adev->rings[i]->sched.thread) {
0875dc9e 3102 kthread_unpark(adev->rings[i]->sched.thread);
0875dc9e 3103 }
d38ceaf9
AD
3104 }
3105 }
3106
4562236b
HW
3107 if (amdgpu_device_has_dc_support(adev)) {
3108 r = drm_atomic_helper_resume(adev->ddev, state);
3109 amdgpu_dm_display_resume(adev);
3110 } else
3111 drm_helper_resume_force_mode(adev->ddev);
d38ceaf9
AD
3112
3113 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
89041940 3114 if (r) {
d38ceaf9
AD
3115 /* bad news, how to tell it to userspace ? */
3116 dev_info(adev->dev, "GPU reset failed\n");
89041940
GW
3117 }
3118 else {
6643be65 3119 dev_info(adev->dev, "GPU reset successed!\n");
89041940 3120 }
d38ceaf9 3121
89041940 3122 amdgpu_vf_error_trans_all(adev);
d38ceaf9
AD
3123 return r;
3124}
3125
d0dd7f0c
AD
3126void amdgpu_get_pcie_info(struct amdgpu_device *adev)
3127{
3128 u32 mask;
3129 int ret;
3130
cd474ba0
AD
3131 if (amdgpu_pcie_gen_cap)
3132 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 3133
cd474ba0
AD
3134 if (amdgpu_pcie_lane_cap)
3135 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 3136
cd474ba0
AD
3137 /* covers APUs as well */
3138 if (pci_is_root_bus(adev->pdev->bus)) {
3139 if (adev->pm.pcie_gen_mask == 0)
3140 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3141 if (adev->pm.pcie_mlw_mask == 0)
3142 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 3143 return;
cd474ba0 3144 }
d0dd7f0c 3145
cd474ba0
AD
3146 if (adev->pm.pcie_gen_mask == 0) {
3147 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3148 if (!ret) {
3149 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3150 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3151 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3152
3153 if (mask & DRM_PCIE_SPEED_25)
3154 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3155 if (mask & DRM_PCIE_SPEED_50)
3156 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3157 if (mask & DRM_PCIE_SPEED_80)
3158 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3159 } else {
3160 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3161 }
3162 }
3163 if (adev->pm.pcie_mlw_mask == 0) {
3164 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3165 if (!ret) {
3166 switch (mask) {
3167 case 32:
3168 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3169 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3170 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3171 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3172 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3173 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3174 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3175 break;
3176 case 16:
3177 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3178 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3179 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3180 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3181 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3182 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3183 break;
3184 case 12:
3185 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3186 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3187 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3188 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3189 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3190 break;
3191 case 8:
3192 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3193 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3194 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3195 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3196 break;
3197 case 4:
3198 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3199 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3200 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3201 break;
3202 case 2:
3203 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3204 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3205 break;
3206 case 1:
3207 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3208 break;
3209 default:
3210 break;
3211 }
3212 } else {
3213 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
3214 }
3215 }
3216}
d38ceaf9
AD
3217
3218/*
3219 * Debugfs
3220 */
3221int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
06ab6832 3222 const struct drm_info_list *files,
d38ceaf9
AD
3223 unsigned nfiles)
3224{
3225 unsigned i;
3226
3227 for (i = 0; i < adev->debugfs_count; i++) {
3228 if (adev->debugfs[i].files == files) {
3229 /* Already registered */
3230 return 0;
3231 }
3232 }
3233
3234 i = adev->debugfs_count + 1;
3235 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3236 DRM_ERROR("Reached maximum number of debugfs components.\n");
3237 DRM_ERROR("Report so we increase "
3238 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3239 return -EINVAL;
3240 }
3241 adev->debugfs[adev->debugfs_count].files = files;
3242 adev->debugfs[adev->debugfs_count].num_files = nfiles;
3243 adev->debugfs_count = i;
3244#if defined(CONFIG_DEBUG_FS)
d38ceaf9
AD
3245 drm_debugfs_create_files(files, nfiles,
3246 adev->ddev->primary->debugfs_root,
3247 adev->ddev->primary);
3248#endif
3249 return 0;
3250}
3251
d38ceaf9
AD
3252#if defined(CONFIG_DEBUG_FS)
3253
3254static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3255 size_t size, loff_t *pos)
3256{
45063097 3257 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
3258 ssize_t result = 0;
3259 int r;
bd12267d 3260 bool pm_pg_lock, use_bank;
56628159 3261 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
3262
3263 if (size & 0x3 || *pos & 0x3)
3264 return -EINVAL;
3265
bd12267d
TSD
3266 /* are we reading registers for which a PG lock is necessary? */
3267 pm_pg_lock = (*pos >> 23) & 1;
3268
56628159 3269 if (*pos & (1ULL << 62)) {
0b968650
TSD
3270 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3271 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3272 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
32977f93
TSD
3273
3274 if (se_bank == 0x3FF)
3275 se_bank = 0xFFFFFFFF;
3276 if (sh_bank == 0x3FF)
3277 sh_bank = 0xFFFFFFFF;
3278 if (instance_bank == 0x3FF)
3279 instance_bank = 0xFFFFFFFF;
56628159 3280 use_bank = 1;
56628159
TSD
3281 } else {
3282 use_bank = 0;
3283 }
3284
801a6aa9 3285 *pos &= (1UL << 22) - 1;
bd12267d 3286
56628159 3287 if (use_bank) {
32977f93
TSD
3288 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3289 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
56628159
TSD
3290 return -EINVAL;
3291 mutex_lock(&adev->grbm_idx_mutex);
3292 amdgpu_gfx_select_se_sh(adev, se_bank,
3293 sh_bank, instance_bank);
3294 }
3295
bd12267d
TSD
3296 if (pm_pg_lock)
3297 mutex_lock(&adev->pm.mutex);
3298
d38ceaf9
AD
3299 while (size) {
3300 uint32_t value;
3301
3302 if (*pos > adev->rmmio_size)
56628159 3303 goto end;
d38ceaf9
AD
3304
3305 value = RREG32(*pos >> 2);
3306 r = put_user(value, (uint32_t *)buf);
56628159
TSD
3307 if (r) {
3308 result = r;
3309 goto end;
3310 }
d38ceaf9
AD
3311
3312 result += 4;
3313 buf += 4;
3314 *pos += 4;
3315 size -= 4;
3316 }
3317
56628159
TSD
3318end:
3319 if (use_bank) {
3320 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3321 mutex_unlock(&adev->grbm_idx_mutex);
3322 }
3323
bd12267d
TSD
3324 if (pm_pg_lock)
3325 mutex_unlock(&adev->pm.mutex);
3326
d38ceaf9
AD
3327 return result;
3328}
3329
3330static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3331 size_t size, loff_t *pos)
3332{
45063097 3333 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
3334 ssize_t result = 0;
3335 int r;
394fdde2
TSD
3336 bool pm_pg_lock, use_bank;
3337 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
3338
3339 if (size & 0x3 || *pos & 0x3)
3340 return -EINVAL;
3341
394fdde2
TSD
3342 /* are we reading registers for which a PG lock is necessary? */
3343 pm_pg_lock = (*pos >> 23) & 1;
3344
3345 if (*pos & (1ULL << 62)) {
0b968650
TSD
3346 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3347 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3348 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
394fdde2
TSD
3349
3350 if (se_bank == 0x3FF)
3351 se_bank = 0xFFFFFFFF;
3352 if (sh_bank == 0x3FF)
3353 sh_bank = 0xFFFFFFFF;
3354 if (instance_bank == 0x3FF)
3355 instance_bank = 0xFFFFFFFF;
3356 use_bank = 1;
3357 } else {
3358 use_bank = 0;
3359 }
3360
801a6aa9 3361 *pos &= (1UL << 22) - 1;
394fdde2
TSD
3362
3363 if (use_bank) {
3364 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3365 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3366 return -EINVAL;
3367 mutex_lock(&adev->grbm_idx_mutex);
3368 amdgpu_gfx_select_se_sh(adev, se_bank,
3369 sh_bank, instance_bank);
3370 }
3371
3372 if (pm_pg_lock)
3373 mutex_lock(&adev->pm.mutex);
3374
d38ceaf9
AD
3375 while (size) {
3376 uint32_t value;
3377
3378 if (*pos > adev->rmmio_size)
3379 return result;
3380
3381 r = get_user(value, (uint32_t *)buf);
3382 if (r)
3383 return r;
3384
3385 WREG32(*pos >> 2, value);
3386
3387 result += 4;
3388 buf += 4;
3389 *pos += 4;
3390 size -= 4;
3391 }
3392
394fdde2
TSD
3393 if (use_bank) {
3394 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3395 mutex_unlock(&adev->grbm_idx_mutex);
3396 }
3397
3398 if (pm_pg_lock)
3399 mutex_unlock(&adev->pm.mutex);
3400
d38ceaf9
AD
3401 return result;
3402}
3403
adcec288
TSD
3404static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3405 size_t size, loff_t *pos)
3406{
45063097 3407 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3408 ssize_t result = 0;
3409 int r;
3410
3411 if (size & 0x3 || *pos & 0x3)
3412 return -EINVAL;
3413
3414 while (size) {
3415 uint32_t value;
3416
3417 value = RREG32_PCIE(*pos >> 2);
3418 r = put_user(value, (uint32_t *)buf);
3419 if (r)
3420 return r;
3421
3422 result += 4;
3423 buf += 4;
3424 *pos += 4;
3425 size -= 4;
3426 }
3427
3428 return result;
3429}
3430
3431static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3432 size_t size, loff_t *pos)
3433{
45063097 3434 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3435 ssize_t result = 0;
3436 int r;
3437
3438 if (size & 0x3 || *pos & 0x3)
3439 return -EINVAL;
3440
3441 while (size) {
3442 uint32_t value;
3443
3444 r = get_user(value, (uint32_t *)buf);
3445 if (r)
3446 return r;
3447
3448 WREG32_PCIE(*pos >> 2, value);
3449
3450 result += 4;
3451 buf += 4;
3452 *pos += 4;
3453 size -= 4;
3454 }
3455
3456 return result;
3457}
3458
3459static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3460 size_t size, loff_t *pos)
3461{
45063097 3462 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3463 ssize_t result = 0;
3464 int r;
3465
3466 if (size & 0x3 || *pos & 0x3)
3467 return -EINVAL;
3468
3469 while (size) {
3470 uint32_t value;
3471
3472 value = RREG32_DIDT(*pos >> 2);
3473 r = put_user(value, (uint32_t *)buf);
3474 if (r)
3475 return r;
3476
3477 result += 4;
3478 buf += 4;
3479 *pos += 4;
3480 size -= 4;
3481 }
3482
3483 return result;
3484}
3485
3486static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3487 size_t size, loff_t *pos)
3488{
45063097 3489 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3490 ssize_t result = 0;
3491 int r;
3492
3493 if (size & 0x3 || *pos & 0x3)
3494 return -EINVAL;
3495
3496 while (size) {
3497 uint32_t value;
3498
3499 r = get_user(value, (uint32_t *)buf);
3500 if (r)
3501 return r;
3502
3503 WREG32_DIDT(*pos >> 2, value);
3504
3505 result += 4;
3506 buf += 4;
3507 *pos += 4;
3508 size -= 4;
3509 }
3510
3511 return result;
3512}
3513
3514static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3515 size_t size, loff_t *pos)
3516{
45063097 3517 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3518 ssize_t result = 0;
3519 int r;
3520
3521 if (size & 0x3 || *pos & 0x3)
3522 return -EINVAL;
3523
3524 while (size) {
3525 uint32_t value;
3526
6fc0deaf 3527 value = RREG32_SMC(*pos);
adcec288
TSD
3528 r = put_user(value, (uint32_t *)buf);
3529 if (r)
3530 return r;
3531
3532 result += 4;
3533 buf += 4;
3534 *pos += 4;
3535 size -= 4;
3536 }
3537
3538 return result;
3539}
3540
3541static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3542 size_t size, loff_t *pos)
3543{
45063097 3544 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3545 ssize_t result = 0;
3546 int r;
3547
3548 if (size & 0x3 || *pos & 0x3)
3549 return -EINVAL;
3550
3551 while (size) {
3552 uint32_t value;
3553
3554 r = get_user(value, (uint32_t *)buf);
3555 if (r)
3556 return r;
3557
6fc0deaf 3558 WREG32_SMC(*pos, value);
adcec288
TSD
3559
3560 result += 4;
3561 buf += 4;
3562 *pos += 4;
3563 size -= 4;
3564 }
3565
3566 return result;
3567}
3568
1e051413
TSD
3569static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3570 size_t size, loff_t *pos)
3571{
45063097 3572 struct amdgpu_device *adev = file_inode(f)->i_private;
1e051413
TSD
3573 ssize_t result = 0;
3574 int r;
3575 uint32_t *config, no_regs = 0;
3576
3577 if (size & 0x3 || *pos & 0x3)
3578 return -EINVAL;
3579
ecab7668 3580 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
1e051413
TSD
3581 if (!config)
3582 return -ENOMEM;
3583
3584 /* version, increment each time something is added */
9a999359 3585 config[no_regs++] = 3;
1e051413
TSD
3586 config[no_regs++] = adev->gfx.config.max_shader_engines;
3587 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3588 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3589 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3590 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3591 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3592 config[no_regs++] = adev->gfx.config.max_gprs;
3593 config[no_regs++] = adev->gfx.config.max_gs_threads;
3594 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3595 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3596 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3597 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3598 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3599 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3600 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3601 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3602 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3603 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3604 config[no_regs++] = adev->gfx.config.num_gpus;
3605 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3606 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3607 config[no_regs++] = adev->gfx.config.gb_addr_config;
3608 config[no_regs++] = adev->gfx.config.num_rbs;
3609
89a8f309
TSD
3610 /* rev==1 */
3611 config[no_regs++] = adev->rev_id;
3612 config[no_regs++] = adev->pg_flags;
3613 config[no_regs++] = adev->cg_flags;
3614
e9f11dc8
TSD
3615 /* rev==2 */
3616 config[no_regs++] = adev->family;
3617 config[no_regs++] = adev->external_rev_id;
3618
9a999359
TSD
3619 /* rev==3 */
3620 config[no_regs++] = adev->pdev->device;
3621 config[no_regs++] = adev->pdev->revision;
3622 config[no_regs++] = adev->pdev->subsystem_device;
3623 config[no_regs++] = adev->pdev->subsystem_vendor;
3624
1e051413
TSD
3625 while (size && (*pos < no_regs * 4)) {
3626 uint32_t value;
3627
3628 value = config[*pos >> 2];
3629 r = put_user(value, (uint32_t *)buf);
3630 if (r) {
3631 kfree(config);
3632 return r;
3633 }
3634
3635 result += 4;
3636 buf += 4;
3637 *pos += 4;
3638 size -= 4;
3639 }
3640
3641 kfree(config);
3642 return result;
3643}
3644
f2cdaf20
TSD
3645static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3646 size_t size, loff_t *pos)
3647{
45063097 3648 struct amdgpu_device *adev = file_inode(f)->i_private;
9f8df7d7
TSD
3649 int idx, x, outsize, r, valuesize;
3650 uint32_t values[16];
f2cdaf20 3651
9f8df7d7 3652 if (size & 3 || *pos & 0x3)
f2cdaf20
TSD
3653 return -EINVAL;
3654
3cbc614f
SP
3655 if (amdgpu_dpm == 0)
3656 return -EINVAL;
3657
f2cdaf20
TSD
3658 /* convert offset to sensor number */
3659 idx = *pos >> 2;
3660
9f8df7d7 3661 valuesize = sizeof(values);
f2cdaf20 3662 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
cd4d7464 3663 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
f2cdaf20
TSD
3664 else
3665 return -EINVAL;
3666
9f8df7d7
TSD
3667 if (size > valuesize)
3668 return -EINVAL;
3669
3670 outsize = 0;
3671 x = 0;
3672 if (!r) {
3673 while (size) {
3674 r = put_user(values[x++], (int32_t *)buf);
3675 buf += 4;
3676 size -= 4;
3677 outsize += 4;
3678 }
3679 }
f2cdaf20 3680
9f8df7d7 3681 return !r ? outsize : r;
f2cdaf20 3682}
1e051413 3683
273d7aa1
TSD
3684static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3685 size_t size, loff_t *pos)
3686{
3687 struct amdgpu_device *adev = f->f_inode->i_private;
3688 int r, x;
3689 ssize_t result=0;
472259f0 3690 uint32_t offset, se, sh, cu, wave, simd, data[32];
273d7aa1
TSD
3691
3692 if (size & 3 || *pos & 3)
3693 return -EINVAL;
3694
3695 /* decode offset */
0b968650
TSD
3696 offset = (*pos & GENMASK_ULL(6, 0));
3697 se = (*pos & GENMASK_ULL(14, 7)) >> 7;
3698 sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
3699 cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
3700 wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
3701 simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
273d7aa1
TSD
3702
3703 /* switch to the specific se/sh/cu */
3704 mutex_lock(&adev->grbm_idx_mutex);
3705 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3706
3707 x = 0;
472259f0
TSD
3708 if (adev->gfx.funcs->read_wave_data)
3709 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
273d7aa1
TSD
3710
3711 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3712 mutex_unlock(&adev->grbm_idx_mutex);
3713
5ecfb3b8
TSD
3714 if (!x)
3715 return -EINVAL;
3716
472259f0 3717 while (size && (offset < x * 4)) {
273d7aa1
TSD
3718 uint32_t value;
3719
472259f0 3720 value = data[offset >> 2];
273d7aa1
TSD
3721 r = put_user(value, (uint32_t *)buf);
3722 if (r)
3723 return r;
3724
3725 result += 4;
3726 buf += 4;
472259f0 3727 offset += 4;
273d7aa1
TSD
3728 size -= 4;
3729 }
3730
3731 return result;
3732}
3733
c5a60ce8
TSD
3734static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3735 size_t size, loff_t *pos)
3736{
3737 struct amdgpu_device *adev = f->f_inode->i_private;
3738 int r;
3739 ssize_t result = 0;
3740 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3741
3742 if (size & 3 || *pos & 3)
3743 return -EINVAL;
3744
3745 /* decode offset */
0b968650
TSD
3746 offset = *pos & GENMASK_ULL(11, 0);
3747 se = (*pos & GENMASK_ULL(19, 12)) >> 12;
3748 sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
3749 cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
3750 wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
3751 simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
3752 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
3753 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
c5a60ce8
TSD
3754
3755 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3756 if (!data)
3757 return -ENOMEM;
3758
3759 /* switch to the specific se/sh/cu */
3760 mutex_lock(&adev->grbm_idx_mutex);
3761 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3762
3763 if (bank == 0) {
3764 if (adev->gfx.funcs->read_wave_vgprs)
3765 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3766 } else {
3767 if (adev->gfx.funcs->read_wave_sgprs)
3768 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3769 }
3770
3771 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3772 mutex_unlock(&adev->grbm_idx_mutex);
3773
3774 while (size) {
3775 uint32_t value;
3776
3777 value = data[offset++];
3778 r = put_user(value, (uint32_t *)buf);
3779 if (r) {
3780 result = r;
3781 goto err;
3782 }
3783
3784 result += 4;
3785 buf += 4;
3786 size -= 4;
3787 }
3788
3789err:
3790 kfree(data);
3791 return result;
3792}
3793
d38ceaf9
AD
3794static const struct file_operations amdgpu_debugfs_regs_fops = {
3795 .owner = THIS_MODULE,
3796 .read = amdgpu_debugfs_regs_read,
3797 .write = amdgpu_debugfs_regs_write,
3798 .llseek = default_llseek
3799};
adcec288
TSD
3800static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3801 .owner = THIS_MODULE,
3802 .read = amdgpu_debugfs_regs_didt_read,
3803 .write = amdgpu_debugfs_regs_didt_write,
3804 .llseek = default_llseek
3805};
3806static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3807 .owner = THIS_MODULE,
3808 .read = amdgpu_debugfs_regs_pcie_read,
3809 .write = amdgpu_debugfs_regs_pcie_write,
3810 .llseek = default_llseek
3811};
3812static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3813 .owner = THIS_MODULE,
3814 .read = amdgpu_debugfs_regs_smc_read,
3815 .write = amdgpu_debugfs_regs_smc_write,
3816 .llseek = default_llseek
3817};
3818
1e051413
TSD
3819static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3820 .owner = THIS_MODULE,
3821 .read = amdgpu_debugfs_gca_config_read,
3822 .llseek = default_llseek
3823};
3824
f2cdaf20
TSD
3825static const struct file_operations amdgpu_debugfs_sensors_fops = {
3826 .owner = THIS_MODULE,
3827 .read = amdgpu_debugfs_sensor_read,
3828 .llseek = default_llseek
3829};
3830
273d7aa1
TSD
3831static const struct file_operations amdgpu_debugfs_wave_fops = {
3832 .owner = THIS_MODULE,
3833 .read = amdgpu_debugfs_wave_read,
3834 .llseek = default_llseek
3835};
c5a60ce8
TSD
3836static const struct file_operations amdgpu_debugfs_gpr_fops = {
3837 .owner = THIS_MODULE,
3838 .read = amdgpu_debugfs_gpr_read,
3839 .llseek = default_llseek
3840};
273d7aa1 3841
adcec288
TSD
3842static const struct file_operations *debugfs_regs[] = {
3843 &amdgpu_debugfs_regs_fops,
3844 &amdgpu_debugfs_regs_didt_fops,
3845 &amdgpu_debugfs_regs_pcie_fops,
3846 &amdgpu_debugfs_regs_smc_fops,
1e051413 3847 &amdgpu_debugfs_gca_config_fops,
f2cdaf20 3848 &amdgpu_debugfs_sensors_fops,
273d7aa1 3849 &amdgpu_debugfs_wave_fops,
c5a60ce8 3850 &amdgpu_debugfs_gpr_fops,
adcec288
TSD
3851};
3852
3853static const char *debugfs_regs_names[] = {
3854 "amdgpu_regs",
3855 "amdgpu_regs_didt",
3856 "amdgpu_regs_pcie",
3857 "amdgpu_regs_smc",
1e051413 3858 "amdgpu_gca_config",
f2cdaf20 3859 "amdgpu_sensors",
273d7aa1 3860 "amdgpu_wave",
c5a60ce8 3861 "amdgpu_gpr",
adcec288 3862};
d38ceaf9
AD
3863
3864static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3865{
3866 struct drm_minor *minor = adev->ddev->primary;
3867 struct dentry *ent, *root = minor->debugfs_root;
adcec288
TSD
3868 unsigned i, j;
3869
3870 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3871 ent = debugfs_create_file(debugfs_regs_names[i],
3872 S_IFREG | S_IRUGO, root,
3873 adev, debugfs_regs[i]);
3874 if (IS_ERR(ent)) {
3875 for (j = 0; j < i; j++) {
3876 debugfs_remove(adev->debugfs_regs[i]);
3877 adev->debugfs_regs[i] = NULL;
3878 }
3879 return PTR_ERR(ent);
3880 }
d38ceaf9 3881
adcec288
TSD
3882 if (!i)
3883 i_size_write(ent->d_inode, adev->rmmio_size);
3884 adev->debugfs_regs[i] = ent;
3885 }
d38ceaf9
AD
3886
3887 return 0;
3888}
3889
3890static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3891{
adcec288
TSD
3892 unsigned i;
3893
3894 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3895 if (adev->debugfs_regs[i]) {
3896 debugfs_remove(adev->debugfs_regs[i]);
3897 adev->debugfs_regs[i] = NULL;
3898 }
3899 }
d38ceaf9
AD
3900}
3901
4f0955fc
HR
3902static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3903{
3904 struct drm_info_node *node = (struct drm_info_node *) m->private;
3905 struct drm_device *dev = node->minor->dev;
3906 struct amdgpu_device *adev = dev->dev_private;
3907 int r = 0, i;
3908
3909 /* hold on the scheduler */
3910 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3911 struct amdgpu_ring *ring = adev->rings[i];
3912
3913 if (!ring || !ring->sched.thread)
3914 continue;
3915 kthread_park(ring->sched.thread);
3916 }
3917
3918 seq_printf(m, "run ib test:\n");
3919 r = amdgpu_ib_ring_tests(adev);
3920 if (r)
3921 seq_printf(m, "ib ring tests failed (%d).\n", r);
3922 else
3923 seq_printf(m, "ib ring tests passed.\n");
3924
3925 /* go on the scheduler */
3926 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3927 struct amdgpu_ring *ring = adev->rings[i];
3928
3929 if (!ring || !ring->sched.thread)
3930 continue;
3931 kthread_unpark(ring->sched.thread);
3932 }
3933
3934 return 0;
3935}
3936
3937static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
3938 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
3939};
3940
3941static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3942{
3943 return amdgpu_debugfs_add_files(adev,
3944 amdgpu_debugfs_test_ib_ring_list, 1);
3945}
3946
d38ceaf9
AD
3947int amdgpu_debugfs_init(struct drm_minor *minor)
3948{
3949 return 0;
3950}
db95e218
KR
3951
3952static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
3953{
3954 struct drm_info_node *node = (struct drm_info_node *) m->private;
3955 struct drm_device *dev = node->minor->dev;
3956 struct amdgpu_device *adev = dev->dev_private;
3957
3958 seq_write(m, adev->bios, adev->bios_size);
3959 return 0;
3960}
3961
db95e218
KR
3962static const struct drm_info_list amdgpu_vbios_dump_list[] = {
3963 {"amdgpu_vbios",
3964 amdgpu_debugfs_get_vbios_dump,
3965 0, NULL},
3966};
3967
db95e218
KR
3968static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3969{
3970 return amdgpu_debugfs_add_files(adev,
3971 amdgpu_vbios_dump_list, 1);
3972}
7cebc728 3973#else
27bad5b9 3974static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
4f0955fc
HR
3975{
3976 return 0;
3977}
7cebc728
AK
3978static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3979{
3980 return 0;
3981}
db95e218
KR
3982static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3983{
3984 return 0;
3985}
7cebc728 3986static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
d38ceaf9 3987#endif