]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drm/amdkfd: initialise kfd inside amdgpu_device_init
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
4562236b 34#include <drm/drm_atomic_helper.h>
d38ceaf9
AD
35#include <drm/amdgpu_drm.h>
36#include <linux/vgaarb.h>
37#include <linux/vga_switcheroo.h>
38#include <linux/efi.h>
39#include "amdgpu.h"
f4b373f4 40#include "amdgpu_trace.h"
d38ceaf9
AD
41#include "amdgpu_i2c.h"
42#include "atom.h"
43#include "amdgpu_atombios.h"
a5bde2f9 44#include "amdgpu_atomfirmware.h"
d0dd7f0c 45#include "amd_pcie.h"
33f34802
KW
46#ifdef CONFIG_DRM_AMDGPU_SI
47#include "si.h"
48#endif
a2e73f56
AD
49#ifdef CONFIG_DRM_AMDGPU_CIK
50#include "cik.h"
51#endif
aaa36a97 52#include "vi.h"
460826e6 53#include "soc15.h"
d38ceaf9 54#include "bif/bif_4_1_d.h"
9accf2fd 55#include <linux/pci.h>
bec86378 56#include <linux/firmware.h>
89041940 57#include "amdgpu_vf_error.h"
d38ceaf9 58
ba997709 59#include "amdgpu_amdkfd.h"
d2f52ac8 60#include "amdgpu_pm.h"
d38ceaf9 61
e2a75f88 62MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
2d2e5e7e 63MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
e2a75f88 64
2dc80b00
S
65#define AMDGPU_RESUME_MS 2000
66
d38ceaf9
AD
67static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
68static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
4f0955fc 69static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
db95e218 70static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
d38ceaf9
AD
71
72static const char *amdgpu_asic_name[] = {
da69c161
KW
73 "TAHITI",
74 "PITCAIRN",
75 "VERDE",
76 "OLAND",
77 "HAINAN",
d38ceaf9
AD
78 "BONAIRE",
79 "KAVERI",
80 "KABINI",
81 "HAWAII",
82 "MULLINS",
83 "TOPAZ",
84 "TONGA",
48299f95 85 "FIJI",
d38ceaf9 86 "CARRIZO",
139f4917 87 "STONEY",
2cc0c0b5
FC
88 "POLARIS10",
89 "POLARIS11",
c4642a47 90 "POLARIS12",
d4196f01 91 "VEGA10",
2ca8a5d2 92 "RAVEN",
d38ceaf9
AD
93 "LAST",
94};
95
96bool amdgpu_device_is_px(struct drm_device *dev)
97{
98 struct amdgpu_device *adev = dev->dev_private;
99
2f7d10b3 100 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
101 return true;
102 return false;
103}
104
105/*
106 * MMIO register access helper functions.
107 */
108uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 109 uint32_t acc_flags)
d38ceaf9 110{
f4b373f4
TSD
111 uint32_t ret;
112
43ca8efa 113 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 114 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 115
15d72fd7 116 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 117 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
118 else {
119 unsigned long flags;
d38ceaf9
AD
120
121 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
122 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
123 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
124 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 125 }
f4b373f4
TSD
126 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
127 return ret;
d38ceaf9
AD
128}
129
130void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 131 uint32_t acc_flags)
d38ceaf9 132{
f4b373f4 133 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 134
47ed4e1c
KW
135 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
136 adev->last_mm_index = v;
137 }
138
43ca8efa 139 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 140 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 141
15d72fd7 142 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
143 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
144 else {
145 unsigned long flags;
146
147 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
148 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
149 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
150 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
151 }
47ed4e1c
KW
152
153 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
154 udelay(500);
155 }
d38ceaf9
AD
156}
157
158u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
159{
160 if ((reg * 4) < adev->rio_mem_size)
161 return ioread32(adev->rio_mem + (reg * 4));
162 else {
163 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
164 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
165 }
166}
167
168void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
169{
47ed4e1c
KW
170 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
171 adev->last_mm_index = v;
172 }
d38ceaf9
AD
173
174 if ((reg * 4) < adev->rio_mem_size)
175 iowrite32(v, adev->rio_mem + (reg * 4));
176 else {
177 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
178 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
179 }
47ed4e1c
KW
180
181 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
182 udelay(500);
183 }
d38ceaf9
AD
184}
185
186/**
187 * amdgpu_mm_rdoorbell - read a doorbell dword
188 *
189 * @adev: amdgpu_device pointer
190 * @index: doorbell index
191 *
192 * Returns the value in the doorbell aperture at the
193 * requested doorbell index (CIK).
194 */
195u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
196{
197 if (index < adev->doorbell.num_doorbells) {
198 return readl(adev->doorbell.ptr + index);
199 } else {
200 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
201 return 0;
202 }
203}
204
205/**
206 * amdgpu_mm_wdoorbell - write a doorbell dword
207 *
208 * @adev: amdgpu_device pointer
209 * @index: doorbell index
210 * @v: value to write
211 *
212 * Writes @v to the doorbell aperture at the
213 * requested doorbell index (CIK).
214 */
215void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
216{
217 if (index < adev->doorbell.num_doorbells) {
218 writel(v, adev->doorbell.ptr + index);
219 } else {
220 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
221 }
222}
223
832be404
KW
224/**
225 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
226 *
227 * @adev: amdgpu_device pointer
228 * @index: doorbell index
229 *
230 * Returns the value in the doorbell aperture at the
231 * requested doorbell index (VEGA10+).
232 */
233u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
234{
235 if (index < adev->doorbell.num_doorbells) {
236 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
237 } else {
238 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
239 return 0;
240 }
241}
242
243/**
244 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
245 *
246 * @adev: amdgpu_device pointer
247 * @index: doorbell index
248 * @v: value to write
249 *
250 * Writes @v to the doorbell aperture at the
251 * requested doorbell index (VEGA10+).
252 */
253void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
254{
255 if (index < adev->doorbell.num_doorbells) {
256 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
257 } else {
258 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
259 }
260}
261
d38ceaf9
AD
262/**
263 * amdgpu_invalid_rreg - dummy reg read function
264 *
265 * @adev: amdgpu device pointer
266 * @reg: offset of register
267 *
268 * Dummy register read function. Used for register blocks
269 * that certain asics don't have (all asics).
270 * Returns the value in the register.
271 */
272static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
273{
274 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
275 BUG();
276 return 0;
277}
278
279/**
280 * amdgpu_invalid_wreg - dummy reg write function
281 *
282 * @adev: amdgpu device pointer
283 * @reg: offset of register
284 * @v: value to write to the register
285 *
286 * Dummy register read function. Used for register blocks
287 * that certain asics don't have (all asics).
288 */
289static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
290{
291 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
292 reg, v);
293 BUG();
294}
295
296/**
297 * amdgpu_block_invalid_rreg - dummy reg read function
298 *
299 * @adev: amdgpu device pointer
300 * @block: offset of instance
301 * @reg: offset of register
302 *
303 * Dummy register read function. Used for register blocks
304 * that certain asics don't have (all asics).
305 * Returns the value in the register.
306 */
307static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
308 uint32_t block, uint32_t reg)
309{
310 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
311 reg, block);
312 BUG();
313 return 0;
314}
315
316/**
317 * amdgpu_block_invalid_wreg - dummy reg write function
318 *
319 * @adev: amdgpu device pointer
320 * @block: offset of instance
321 * @reg: offset of register
322 * @v: value to write to the register
323 *
324 * Dummy register read function. Used for register blocks
325 * that certain asics don't have (all asics).
326 */
327static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
328 uint32_t block,
329 uint32_t reg, uint32_t v)
330{
331 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
332 reg, block, v);
333 BUG();
334}
335
336static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
337{
a4a02777
CK
338 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
339 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
340 &adev->vram_scratch.robj,
341 &adev->vram_scratch.gpu_addr,
342 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
343}
344
345static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
346{
078af1a3 347 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
348}
349
350/**
351 * amdgpu_program_register_sequence - program an array of registers.
352 *
353 * @adev: amdgpu_device pointer
354 * @registers: pointer to the register array
355 * @array_size: size of the register array
356 *
357 * Programs an array or registers with and and or masks.
358 * This is a helper for setting golden registers.
359 */
360void amdgpu_program_register_sequence(struct amdgpu_device *adev,
361 const u32 *registers,
362 const u32 array_size)
363{
364 u32 tmp, reg, and_mask, or_mask;
365 int i;
366
367 if (array_size % 3)
368 return;
369
370 for (i = 0; i < array_size; i +=3) {
371 reg = registers[i + 0];
372 and_mask = registers[i + 1];
373 or_mask = registers[i + 2];
374
375 if (and_mask == 0xffffffff) {
376 tmp = or_mask;
377 } else {
378 tmp = RREG32(reg);
379 tmp &= ~and_mask;
380 tmp |= or_mask;
381 }
382 WREG32(reg, tmp);
383 }
384}
385
386void amdgpu_pci_config_reset(struct amdgpu_device *adev)
387{
388 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
389}
390
391/*
392 * GPU doorbell aperture helpers function.
393 */
394/**
395 * amdgpu_doorbell_init - Init doorbell driver information.
396 *
397 * @adev: amdgpu_device pointer
398 *
399 * Init doorbell driver information (CIK)
400 * Returns 0 on success, error on failure.
401 */
402static int amdgpu_doorbell_init(struct amdgpu_device *adev)
403{
705e519e
CK
404 /* No doorbell on SI hardware generation */
405 if (adev->asic_type < CHIP_BONAIRE) {
406 adev->doorbell.base = 0;
407 adev->doorbell.size = 0;
408 adev->doorbell.num_doorbells = 0;
409 adev->doorbell.ptr = NULL;
410 return 0;
411 }
412
d6895ad3
CK
413 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
414 return -EINVAL;
415
d38ceaf9
AD
416 /* doorbell bar mapping */
417 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
418 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
419
edf600da 420 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
421 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
422 if (adev->doorbell.num_doorbells == 0)
423 return -EINVAL;
424
8972e5d2
CK
425 adev->doorbell.ptr = ioremap(adev->doorbell.base,
426 adev->doorbell.num_doorbells *
427 sizeof(u32));
428 if (adev->doorbell.ptr == NULL)
d38ceaf9 429 return -ENOMEM;
d38ceaf9
AD
430
431 return 0;
432}
433
434/**
435 * amdgpu_doorbell_fini - Tear down doorbell driver information.
436 *
437 * @adev: amdgpu_device pointer
438 *
439 * Tear down doorbell driver information (CIK)
440 */
441static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
442{
443 iounmap(adev->doorbell.ptr);
444 adev->doorbell.ptr = NULL;
445}
446
447/**
448 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
449 * setup amdkfd
450 *
451 * @adev: amdgpu_device pointer
452 * @aperture_base: output returning doorbell aperture base physical address
453 * @aperture_size: output returning doorbell aperture size in bytes
454 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
455 *
456 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
457 * takes doorbells required for its own rings and reports the setup to amdkfd.
458 * amdgpu reserved doorbells are at the start of the doorbell aperture.
459 */
460void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
461 phys_addr_t *aperture_base,
462 size_t *aperture_size,
463 size_t *start_offset)
464{
465 /*
466 * The first num_doorbells are used by amdgpu.
467 * amdkfd takes whatever's left in the aperture.
468 */
469 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
470 *aperture_base = adev->doorbell.base;
471 *aperture_size = adev->doorbell.size;
472 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
473 } else {
474 *aperture_base = 0;
475 *aperture_size = 0;
476 *start_offset = 0;
477 }
478}
479
480/*
481 * amdgpu_wb_*()
455a7bc2 482 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 483 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
484 */
485
486/**
487 * amdgpu_wb_fini - Disable Writeback and free memory
488 *
489 * @adev: amdgpu_device pointer
490 *
491 * Disables Writeback and frees the Writeback memory (all asics).
492 * Used at driver shutdown.
493 */
494static void amdgpu_wb_fini(struct amdgpu_device *adev)
495{
496 if (adev->wb.wb_obj) {
a76ed485
AD
497 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
498 &adev->wb.gpu_addr,
499 (void **)&adev->wb.wb);
d38ceaf9
AD
500 adev->wb.wb_obj = NULL;
501 }
502}
503
504/**
505 * amdgpu_wb_init- Init Writeback driver info and allocate memory
506 *
507 * @adev: amdgpu_device pointer
508 *
455a7bc2 509 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
510 * Used at driver startup.
511 * Returns 0 on success or an -error on failure.
512 */
513static int amdgpu_wb_init(struct amdgpu_device *adev)
514{
515 int r;
516
517 if (adev->wb.wb_obj == NULL) {
97407b63
AD
518 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
519 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
520 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
521 &adev->wb.wb_obj, &adev->wb.gpu_addr,
522 (void **)&adev->wb.wb);
d38ceaf9
AD
523 if (r) {
524 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
525 return r;
526 }
d38ceaf9
AD
527
528 adev->wb.num_wb = AMDGPU_MAX_WB;
529 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
530
531 /* clear wb memory */
60a970a6 532 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
d38ceaf9
AD
533 }
534
535 return 0;
536}
537
538/**
539 * amdgpu_wb_get - Allocate a wb entry
540 *
541 * @adev: amdgpu_device pointer
542 * @wb: wb index
543 *
544 * Allocate a wb slot for use by the driver (all asics).
545 * Returns 0 on success or -EINVAL on failure.
546 */
547int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
548{
549 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 550
97407b63 551 if (offset < adev->wb.num_wb) {
7014285a 552 __set_bit(offset, adev->wb.used);
63ae07ca 553 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
554 return 0;
555 } else {
556 return -EINVAL;
557 }
558}
559
d38ceaf9
AD
560/**
561 * amdgpu_wb_free - Free a wb entry
562 *
563 * @adev: amdgpu_device pointer
564 * @wb: wb index
565 *
566 * Free a wb slot allocated for use by the driver (all asics)
567 */
568void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
569{
570 if (wb < adev->wb.num_wb)
63ae07ca 571 __clear_bit(wb >> 3, adev->wb.used);
d38ceaf9
AD
572}
573
574/**
575 * amdgpu_vram_location - try to find VRAM location
576 * @adev: amdgpu device structure holding all necessary informations
577 * @mc: memory controller structure holding memory informations
578 * @base: base address at which to put VRAM
579 *
455a7bc2 580 * Function will try to place VRAM at base address provided
d38ceaf9
AD
581 * as parameter (which is so far either PCI aperture address or
582 * for IGP TOM base address).
583 *
584 * If there is not enough space to fit the unvisible VRAM in the 32bits
585 * address space then we limit the VRAM size to the aperture.
586 *
587 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
588 * this shouldn't be a problem as we are using the PCI aperture as a reference.
589 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
590 * not IGP.
591 *
592 * Note: we use mc_vram_size as on some board we need to program the mc to
593 * cover the whole aperture even if VRAM size is inferior to aperture size
594 * Novell bug 204882 + along with lots of ubuntu ones
595 *
596 * Note: when limiting vram it's safe to overwritte real_vram_size because
597 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
598 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
599 * ones)
600 *
601 * Note: IGP TOM addr should be the same as the aperture addr, we don't
455a7bc2 602 * explicitly check for that though.
d38ceaf9
AD
603 *
604 * FIXME: when reducing VRAM size align new size on power of 2.
605 */
606void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
607{
608 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
609
610 mc->vram_start = base;
611 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
612 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
613 mc->real_vram_size = mc->aper_size;
614 mc->mc_vram_size = mc->aper_size;
615 }
616 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
617 if (limit && limit < mc->real_vram_size)
618 mc->real_vram_size = limit;
619 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
620 mc->mc_vram_size >> 20, mc->vram_start,
621 mc->vram_end, mc->real_vram_size >> 20);
622}
623
624/**
6f02a696 625 * amdgpu_gart_location - try to find GTT location
d38ceaf9
AD
626 * @adev: amdgpu device structure holding all necessary informations
627 * @mc: memory controller structure holding memory informations
628 *
629 * Function will place try to place GTT before or after VRAM.
630 *
631 * If GTT size is bigger than space left then we ajust GTT size.
632 * Thus function will never fails.
633 *
634 * FIXME: when reducing GTT size align new size on power of 2.
635 */
6f02a696 636void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
d38ceaf9
AD
637{
638 u64 size_af, size_bf;
639
ed21c047
CK
640 size_af = adev->mc.mc_mask - mc->vram_end;
641 size_bf = mc->vram_start;
d38ceaf9 642 if (size_bf > size_af) {
6f02a696 643 if (mc->gart_size > size_bf) {
d38ceaf9 644 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 645 mc->gart_size = size_bf;
d38ceaf9 646 }
6f02a696 647 mc->gart_start = 0;
d38ceaf9 648 } else {
6f02a696 649 if (mc->gart_size > size_af) {
d38ceaf9 650 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 651 mc->gart_size = size_af;
d38ceaf9 652 }
6f02a696 653 mc->gart_start = mc->vram_end + 1;
d38ceaf9 654 }
6f02a696 655 mc->gart_end = mc->gart_start + mc->gart_size - 1;
d38ceaf9 656 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6f02a696 657 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
d38ceaf9
AD
658}
659
a05502e5
HC
660/*
661 * Firmware Reservation functions
662 */
663/**
664 * amdgpu_fw_reserve_vram_fini - free fw reserved vram
665 *
666 * @adev: amdgpu_device pointer
667 *
668 * free fw reserved vram if it has been reserved.
669 */
670void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
671{
672 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
673 NULL, &adev->fw_vram_usage.va);
674}
675
676/**
677 * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
678 *
679 * @adev: amdgpu_device pointer
680 *
681 * create bo vram reservation from fw.
682 */
683int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
684{
685 int r = 0;
3c738893 686 int i;
a05502e5
HC
687 u64 gpu_addr;
688 u64 vram_size = adev->mc.visible_vram_size;
3c738893
HC
689 u64 offset = adev->fw_vram_usage.start_offset;
690 u64 size = adev->fw_vram_usage.size;
691 struct amdgpu_bo *bo;
a05502e5
HC
692
693 adev->fw_vram_usage.va = NULL;
694 adev->fw_vram_usage.reserved_bo = NULL;
695
696 if (adev->fw_vram_usage.size > 0 &&
697 adev->fw_vram_usage.size <= vram_size) {
698
699 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
3c738893 700 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
a05502e5
HC
701 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
702 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
703 &adev->fw_vram_usage.reserved_bo);
704 if (r)
705 goto error_create;
706
707 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
708 if (r)
709 goto error_reserve;
3c738893
HC
710
711 /* remove the original mem node and create a new one at the
712 * request position
713 */
714 bo = adev->fw_vram_usage.reserved_bo;
715 offset = ALIGN(offset, PAGE_SIZE);
716 for (i = 0; i < bo->placement.num_placement; ++i) {
717 bo->placements[i].fpfn = offset >> PAGE_SHIFT;
718 bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
719 }
720
721 ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
722 r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem,
723 false, false);
724 if (r)
725 goto error_pin;
726
a05502e5
HC
727 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
728 AMDGPU_GEM_DOMAIN_VRAM,
729 adev->fw_vram_usage.start_offset,
730 (adev->fw_vram_usage.start_offset +
731 adev->fw_vram_usage.size), &gpu_addr);
732 if (r)
733 goto error_pin;
734 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
735 &adev->fw_vram_usage.va);
736 if (r)
737 goto error_kmap;
738
739 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
740 }
741 return r;
742
743error_kmap:
744 amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
745error_pin:
746 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
747error_reserve:
748 amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
749error_create:
750 adev->fw_vram_usage.va = NULL;
751 adev->fw_vram_usage.reserved_bo = NULL;
752 return r;
753}
754
d6895ad3
CK
755/**
756 * amdgpu_device_resize_fb_bar - try to resize FB BAR
757 *
758 * @adev: amdgpu_device pointer
759 *
760 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
761 * to fail, but if any of the BARs is not accessible after the size we abort
762 * driver loading by returning -ENODEV.
763 */
764int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
765{
766 u64 space_needed = roundup_pow_of_two(adev->mc.real_vram_size);
767 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
768 u16 cmd;
769 int r;
770
771 /* Disable memory decoding while we change the BAR addresses and size */
772 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
773 pci_write_config_word(adev->pdev, PCI_COMMAND,
774 cmd & ~PCI_COMMAND_MEMORY);
775
776 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
777 amdgpu_doorbell_fini(adev);
778 if (adev->asic_type >= CHIP_BONAIRE)
779 pci_release_resource(adev->pdev, 2);
780
781 pci_release_resource(adev->pdev, 0);
782
783 r = pci_resize_resource(adev->pdev, 0, rbar_size);
784 if (r == -ENOSPC)
785 DRM_INFO("Not enough PCI address space for a large BAR.");
786 else if (r && r != -ENOTSUPP)
787 DRM_ERROR("Problem resizing BAR0 (%d).", r);
788
789 pci_assign_unassigned_bus_resources(adev->pdev->bus);
790
791 /* When the doorbell or fb BAR isn't available we have no chance of
792 * using the device.
793 */
794 r = amdgpu_doorbell_init(adev);
795 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
796 return -ENODEV;
797
798 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
799
800 return 0;
801}
a05502e5 802
d38ceaf9
AD
803/*
804 * GPU helpers function.
805 */
806/**
c836fec5 807 * amdgpu_need_post - check if the hw need post or not
d38ceaf9
AD
808 *
809 * @adev: amdgpu_device pointer
810 *
c836fec5
JQ
811 * Check if the asic has been initialized (all asics) at driver startup
812 * or post is needed if hw reset is performed.
813 * Returns true if need or false if not.
d38ceaf9 814 */
c836fec5 815bool amdgpu_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
816{
817 uint32_t reg;
818
bec86378
ML
819 if (amdgpu_sriov_vf(adev))
820 return false;
821
822 if (amdgpu_passthrough(adev)) {
1da2c326
ML
823 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
824 * some old smc fw still need driver do vPost otherwise gpu hang, while
825 * those smc fw version above 22.15 doesn't have this flaw, so we force
826 * vpost executed for smc version below 22.15
bec86378
ML
827 */
828 if (adev->asic_type == CHIP_FIJI) {
829 int err;
830 uint32_t fw_ver;
831 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
832 /* force vPost if error occured */
833 if (err)
834 return true;
835
836 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
837 if (fw_ver < 0x00160e00)
838 return true;
bec86378 839 }
bec86378 840 }
91fe77eb 841
842 if (adev->has_hw_reset) {
843 adev->has_hw_reset = false;
844 return true;
845 }
846
847 /* bios scratch used on CIK+ */
848 if (adev->asic_type >= CHIP_BONAIRE)
849 return amdgpu_atombios_scratch_need_asic_init(adev);
850
851 /* check MEM_SIZE for older asics */
852 reg = amdgpu_asic_get_config_memsize(adev);
853
854 if ((reg != 0) && (reg != 0xffffffff))
855 return false;
856
857 return true;
bec86378
ML
858}
859
d38ceaf9
AD
860/**
861 * amdgpu_dummy_page_init - init dummy page used by the driver
862 *
863 * @adev: amdgpu_device pointer
864 *
865 * Allocate the dummy page used by the driver (all asics).
866 * This dummy page is used by the driver as a filler for gart entries
867 * when pages are taken out of the GART
868 * Returns 0 on sucess, -ENOMEM on failure.
869 */
870int amdgpu_dummy_page_init(struct amdgpu_device *adev)
871{
872 if (adev->dummy_page.page)
873 return 0;
874 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
875 if (adev->dummy_page.page == NULL)
876 return -ENOMEM;
877 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
878 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
879 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
880 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
881 __free_page(adev->dummy_page.page);
882 adev->dummy_page.page = NULL;
883 return -ENOMEM;
884 }
885 return 0;
886}
887
888/**
889 * amdgpu_dummy_page_fini - free dummy page used by the driver
890 *
891 * @adev: amdgpu_device pointer
892 *
893 * Frees the dummy page used by the driver (all asics).
894 */
895void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
896{
897 if (adev->dummy_page.page == NULL)
898 return;
899 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
900 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
901 __free_page(adev->dummy_page.page);
902 adev->dummy_page.page = NULL;
903}
904
905
906/* ATOM accessor methods */
907/*
908 * ATOM is an interpreted byte code stored in tables in the vbios. The
909 * driver registers callbacks to access registers and the interpreter
910 * in the driver parses the tables and executes then to program specific
911 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
912 * atombios.h, and atom.c
913 */
914
915/**
916 * cail_pll_read - read PLL register
917 *
918 * @info: atom card_info pointer
919 * @reg: PLL register offset
920 *
921 * Provides a PLL register accessor for the atom interpreter (r4xx+).
922 * Returns the value of the PLL register.
923 */
924static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
925{
926 return 0;
927}
928
929/**
930 * cail_pll_write - write PLL register
931 *
932 * @info: atom card_info pointer
933 * @reg: PLL register offset
934 * @val: value to write to the pll register
935 *
936 * Provides a PLL register accessor for the atom interpreter (r4xx+).
937 */
938static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
939{
940
941}
942
943/**
944 * cail_mc_read - read MC (Memory Controller) register
945 *
946 * @info: atom card_info pointer
947 * @reg: MC register offset
948 *
949 * Provides an MC register accessor for the atom interpreter (r4xx+).
950 * Returns the value of the MC register.
951 */
952static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
953{
954 return 0;
955}
956
957/**
958 * cail_mc_write - write MC (Memory Controller) register
959 *
960 * @info: atom card_info pointer
961 * @reg: MC register offset
962 * @val: value to write to the pll register
963 *
964 * Provides a MC register accessor for the atom interpreter (r4xx+).
965 */
966static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
967{
968
969}
970
971/**
972 * cail_reg_write - write MMIO register
973 *
974 * @info: atom card_info pointer
975 * @reg: MMIO register offset
976 * @val: value to write to the pll register
977 *
978 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
979 */
980static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
981{
982 struct amdgpu_device *adev = info->dev->dev_private;
983
984 WREG32(reg, val);
985}
986
987/**
988 * cail_reg_read - read MMIO register
989 *
990 * @info: atom card_info pointer
991 * @reg: MMIO register offset
992 *
993 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
994 * Returns the value of the MMIO register.
995 */
996static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
997{
998 struct amdgpu_device *adev = info->dev->dev_private;
999 uint32_t r;
1000
1001 r = RREG32(reg);
1002 return r;
1003}
1004
1005/**
1006 * cail_ioreg_write - write IO register
1007 *
1008 * @info: atom card_info pointer
1009 * @reg: IO register offset
1010 * @val: value to write to the pll register
1011 *
1012 * Provides a IO register accessor for the atom interpreter (r4xx+).
1013 */
1014static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
1015{
1016 struct amdgpu_device *adev = info->dev->dev_private;
1017
1018 WREG32_IO(reg, val);
1019}
1020
1021/**
1022 * cail_ioreg_read - read IO register
1023 *
1024 * @info: atom card_info pointer
1025 * @reg: IO register offset
1026 *
1027 * Provides an IO register accessor for the atom interpreter (r4xx+).
1028 * Returns the value of the IO register.
1029 */
1030static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
1031{
1032 struct amdgpu_device *adev = info->dev->dev_private;
1033 uint32_t r;
1034
1035 r = RREG32_IO(reg);
1036 return r;
1037}
1038
5b41d94c
KR
1039static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
1040 struct device_attribute *attr,
1041 char *buf)
1042{
1043 struct drm_device *ddev = dev_get_drvdata(dev);
1044 struct amdgpu_device *adev = ddev->dev_private;
1045 struct atom_context *ctx = adev->mode_info.atom_context;
1046
1047 return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
1048}
1049
1050static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
1051 NULL);
1052
d38ceaf9
AD
1053/**
1054 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
1055 *
1056 * @adev: amdgpu_device pointer
1057 *
1058 * Frees the driver info and register access callbacks for the ATOM
1059 * interpreter (r4xx+).
1060 * Called at driver shutdown.
1061 */
1062static void amdgpu_atombios_fini(struct amdgpu_device *adev)
1063{
89e0ec9f 1064 if (adev->mode_info.atom_context) {
d38ceaf9 1065 kfree(adev->mode_info.atom_context->scratch);
89e0ec9f
ML
1066 kfree(adev->mode_info.atom_context->iio);
1067 }
d38ceaf9
AD
1068 kfree(adev->mode_info.atom_context);
1069 adev->mode_info.atom_context = NULL;
1070 kfree(adev->mode_info.atom_card_info);
1071 adev->mode_info.atom_card_info = NULL;
5b41d94c 1072 device_remove_file(adev->dev, &dev_attr_vbios_version);
d38ceaf9
AD
1073}
1074
1075/**
1076 * amdgpu_atombios_init - init the driver info and callbacks for atombios
1077 *
1078 * @adev: amdgpu_device pointer
1079 *
1080 * Initializes the driver info and register access callbacks for the
1081 * ATOM interpreter (r4xx+).
1082 * Returns 0 on sucess, -ENOMEM on failure.
1083 * Called at driver startup.
1084 */
1085static int amdgpu_atombios_init(struct amdgpu_device *adev)
1086{
1087 struct card_info *atom_card_info =
1088 kzalloc(sizeof(struct card_info), GFP_KERNEL);
5b41d94c 1089 int ret;
d38ceaf9
AD
1090
1091 if (!atom_card_info)
1092 return -ENOMEM;
1093
1094 adev->mode_info.atom_card_info = atom_card_info;
1095 atom_card_info->dev = adev->ddev;
1096 atom_card_info->reg_read = cail_reg_read;
1097 atom_card_info->reg_write = cail_reg_write;
1098 /* needed for iio ops */
1099 if (adev->rio_mem) {
1100 atom_card_info->ioreg_read = cail_ioreg_read;
1101 atom_card_info->ioreg_write = cail_ioreg_write;
1102 } else {
9953b72f 1103 DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
d38ceaf9
AD
1104 atom_card_info->ioreg_read = cail_reg_read;
1105 atom_card_info->ioreg_write = cail_reg_write;
1106 }
1107 atom_card_info->mc_read = cail_mc_read;
1108 atom_card_info->mc_write = cail_mc_write;
1109 atom_card_info->pll_read = cail_pll_read;
1110 atom_card_info->pll_write = cail_pll_write;
1111
1112 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
1113 if (!adev->mode_info.atom_context) {
1114 amdgpu_atombios_fini(adev);
1115 return -ENOMEM;
1116 }
1117
1118 mutex_init(&adev->mode_info.atom_context->mutex);
a5bde2f9
AD
1119 if (adev->is_atom_fw) {
1120 amdgpu_atomfirmware_scratch_regs_init(adev);
1121 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1122 } else {
1123 amdgpu_atombios_scratch_regs_init(adev);
1124 amdgpu_atombios_allocate_fb_scratch(adev);
1125 }
5b41d94c
KR
1126
1127 ret = device_create_file(adev->dev, &dev_attr_vbios_version);
1128 if (ret) {
1129 DRM_ERROR("Failed to create device file for VBIOS version\n");
1130 return ret;
1131 }
1132
d38ceaf9
AD
1133 return 0;
1134}
1135
1136/* if we get transitioned to only one device, take VGA back */
1137/**
1138 * amdgpu_vga_set_decode - enable/disable vga decode
1139 *
1140 * @cookie: amdgpu_device pointer
1141 * @state: enable/disable vga decode
1142 *
1143 * Enable/disable vga decode (all asics).
1144 * Returns VGA resource flags.
1145 */
1146static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1147{
1148 struct amdgpu_device *adev = cookie;
1149 amdgpu_asic_set_vga_state(adev, state);
1150 if (state)
1151 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1152 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1153 else
1154 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1155}
1156
bab4fee7 1157static void amdgpu_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
1158{
1159 /* defines number of bits in page table versus page directory,
1160 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1161 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
1162 if (amdgpu_vm_block_size == -1)
1163 return;
a1adf8be 1164
bab4fee7 1165 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
1166 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1167 amdgpu_vm_block_size);
bab4fee7 1168 goto def_value;
a1adf8be
CZ
1169 }
1170
1171 if (amdgpu_vm_block_size > 24 ||
1172 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1173 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1174 amdgpu_vm_block_size);
bab4fee7 1175 goto def_value;
a1adf8be 1176 }
bab4fee7
JZ
1177
1178 return;
1179
1180def_value:
1181 amdgpu_vm_block_size = -1;
a1adf8be
CZ
1182}
1183
83ca145d
ZJ
1184static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1185{
64dab074
AD
1186 /* no need to check the default value */
1187 if (amdgpu_vm_size == -1)
1188 return;
1189
76117507 1190 if (!is_power_of_2(amdgpu_vm_size)) {
83ca145d
ZJ
1191 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1192 amdgpu_vm_size);
1193 goto def_value;
1194 }
1195
1196 if (amdgpu_vm_size < 1) {
1197 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1198 amdgpu_vm_size);
1199 goto def_value;
1200 }
1201
1202 /*
1203 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1204 */
1205 if (amdgpu_vm_size > 1024) {
1206 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1207 amdgpu_vm_size);
1208 goto def_value;
1209 }
1210
1211 return;
1212
1213def_value:
bab4fee7 1214 amdgpu_vm_size = -1;
83ca145d
ZJ
1215}
1216
d38ceaf9
AD
1217/**
1218 * amdgpu_check_arguments - validate module params
1219 *
1220 * @adev: amdgpu_device pointer
1221 *
1222 * Validates certain module parameters and updates
1223 * the associated values used by the driver (all asics).
1224 */
1225static void amdgpu_check_arguments(struct amdgpu_device *adev)
1226{
5b011235
CZ
1227 if (amdgpu_sched_jobs < 4) {
1228 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1229 amdgpu_sched_jobs);
1230 amdgpu_sched_jobs = 4;
76117507 1231 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
1232 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1233 amdgpu_sched_jobs);
1234 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1235 }
d38ceaf9 1236
83e74db6 1237 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
1238 /* gart size must be greater or equal to 32M */
1239 dev_warn(adev->dev, "gart size (%d) too small\n",
1240 amdgpu_gart_size);
83e74db6 1241 amdgpu_gart_size = -1;
d38ceaf9
AD
1242 }
1243
36d38372 1244 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 1245 /* gtt size must be greater or equal to 32M */
36d38372
CK
1246 dev_warn(adev->dev, "gtt size (%d) too small\n",
1247 amdgpu_gtt_size);
1248 amdgpu_gtt_size = -1;
d38ceaf9
AD
1249 }
1250
d07f14be
RH
1251 /* valid range is between 4 and 9 inclusive */
1252 if (amdgpu_vm_fragment_size != -1 &&
1253 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1254 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1255 amdgpu_vm_fragment_size = -1;
1256 }
1257
83ca145d 1258 amdgpu_check_vm_size(adev);
d38ceaf9 1259
bab4fee7 1260 amdgpu_check_block_size(adev);
6a7f76e7 1261
526bae37 1262 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 1263 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
1264 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1265 amdgpu_vram_page_split);
1266 amdgpu_vram_page_split = 1024;
1267 }
d38ceaf9
AD
1268}
1269
1270/**
1271 * amdgpu_switcheroo_set_state - set switcheroo state
1272 *
1273 * @pdev: pci dev pointer
1694467b 1274 * @state: vga_switcheroo state
d38ceaf9
AD
1275 *
1276 * Callback for the switcheroo driver. Suspends or resumes the
1277 * the asics before or after it is powered up using ACPI methods.
1278 */
1279static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1280{
1281 struct drm_device *dev = pci_get_drvdata(pdev);
1282
1283 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1284 return;
1285
1286 if (state == VGA_SWITCHEROO_ON) {
7ca85295 1287 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1288 /* don't suspend or resume card normally */
1289 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1290
810ddc3a 1291 amdgpu_device_resume(dev, true, true);
d38ceaf9 1292
d38ceaf9
AD
1293 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1294 drm_kms_helper_poll_enable(dev);
1295 } else {
7ca85295 1296 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1297 drm_kms_helper_poll_disable(dev);
1298 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1299 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1300 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1301 }
1302}
1303
1304/**
1305 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1306 *
1307 * @pdev: pci dev pointer
1308 *
1309 * Callback for the switcheroo driver. Check of the switcheroo
1310 * state can be changed.
1311 * Returns true if the state can be changed, false if not.
1312 */
1313static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1314{
1315 struct drm_device *dev = pci_get_drvdata(pdev);
1316
1317 /*
1318 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1319 * locking inversion with the driver load path. And the access here is
1320 * completely racy anyway. So don't bother with locking for now.
1321 */
1322 return dev->open_count == 0;
1323}
1324
1325static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1326 .set_gpu_state = amdgpu_switcheroo_set_state,
1327 .reprobe = NULL,
1328 .can_switch = amdgpu_switcheroo_can_switch,
1329};
1330
1331int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
5fc3aeeb 1332 enum amd_ip_block_type block_type,
1333 enum amd_clockgating_state state)
d38ceaf9
AD
1334{
1335 int i, r = 0;
1336
1337 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1338 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1339 continue;
c722865a
RZ
1340 if (adev->ip_blocks[i].version->type != block_type)
1341 continue;
1342 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1343 continue;
1344 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1345 (void *)adev, state);
1346 if (r)
1347 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1348 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1349 }
1350 return r;
1351}
1352
1353int amdgpu_set_powergating_state(struct amdgpu_device *adev,
5fc3aeeb 1354 enum amd_ip_block_type block_type,
1355 enum amd_powergating_state state)
d38ceaf9
AD
1356{
1357 int i, r = 0;
1358
1359 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1360 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1361 continue;
c722865a
RZ
1362 if (adev->ip_blocks[i].version->type != block_type)
1363 continue;
1364 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1365 continue;
1366 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1367 (void *)adev, state);
1368 if (r)
1369 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1370 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1371 }
1372 return r;
1373}
1374
6cb2d4e4
HR
1375void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1376{
1377 int i;
1378
1379 for (i = 0; i < adev->num_ip_blocks; i++) {
1380 if (!adev->ip_blocks[i].status.valid)
1381 continue;
1382 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1383 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1384 }
1385}
1386
5dbbb60b
AD
1387int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1388 enum amd_ip_block_type block_type)
1389{
1390 int i, r;
1391
1392 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1393 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1394 continue;
a1255107
AD
1395 if (adev->ip_blocks[i].version->type == block_type) {
1396 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1397 if (r)
1398 return r;
1399 break;
1400 }
1401 }
1402 return 0;
1403
1404}
1405
1406bool amdgpu_is_idle(struct amdgpu_device *adev,
1407 enum amd_ip_block_type block_type)
1408{
1409 int i;
1410
1411 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1412 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1413 continue;
a1255107
AD
1414 if (adev->ip_blocks[i].version->type == block_type)
1415 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1416 }
1417 return true;
1418
1419}
1420
a1255107
AD
1421struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1422 enum amd_ip_block_type type)
d38ceaf9
AD
1423{
1424 int i;
1425
1426 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1427 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1428 return &adev->ip_blocks[i];
1429
1430 return NULL;
1431}
1432
1433/**
1434 * amdgpu_ip_block_version_cmp
1435 *
1436 * @adev: amdgpu_device pointer
5fc3aeeb 1437 * @type: enum amd_ip_block_type
d38ceaf9
AD
1438 * @major: major version
1439 * @minor: minor version
1440 *
1441 * return 0 if equal or greater
1442 * return 1 if smaller or the ip_block doesn't exist
1443 */
1444int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
5fc3aeeb 1445 enum amd_ip_block_type type,
d38ceaf9
AD
1446 u32 major, u32 minor)
1447{
a1255107 1448 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
d38ceaf9 1449
a1255107
AD
1450 if (ip_block && ((ip_block->version->major > major) ||
1451 ((ip_block->version->major == major) &&
1452 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1453 return 0;
1454
1455 return 1;
1456}
1457
a1255107
AD
1458/**
1459 * amdgpu_ip_block_add
1460 *
1461 * @adev: amdgpu_device pointer
1462 * @ip_block_version: pointer to the IP to add
1463 *
1464 * Adds the IP block driver information to the collection of IPs
1465 * on the asic.
1466 */
1467int amdgpu_ip_block_add(struct amdgpu_device *adev,
1468 const struct amdgpu_ip_block_version *ip_block_version)
1469{
1470 if (!ip_block_version)
1471 return -EINVAL;
1472
a0bae357
HR
1473 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1474 ip_block_version->funcs->name);
1475
a1255107
AD
1476 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1477
1478 return 0;
1479}
1480
483ef985 1481static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1482{
1483 adev->enable_virtual_display = false;
1484
1485 if (amdgpu_virtual_display) {
1486 struct drm_device *ddev = adev->ddev;
1487 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1488 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1489
1490 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1491 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1492 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1493 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1494 if (!strcmp("all", pciaddname)
1495 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1496 long num_crtc;
1497 int res = -1;
1498
9accf2fd 1499 adev->enable_virtual_display = true;
0f66356d
ED
1500
1501 if (pciaddname_tmp)
1502 res = kstrtol(pciaddname_tmp, 10,
1503 &num_crtc);
1504
1505 if (!res) {
1506 if (num_crtc < 1)
1507 num_crtc = 1;
1508 if (num_crtc > 6)
1509 num_crtc = 6;
1510 adev->mode_info.num_crtc = num_crtc;
1511 } else {
1512 adev->mode_info.num_crtc = 1;
1513 }
9accf2fd
ED
1514 break;
1515 }
1516 }
1517
0f66356d
ED
1518 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1519 amdgpu_virtual_display, pci_address_name,
1520 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1521
1522 kfree(pciaddstr);
1523 }
1524}
1525
e2a75f88
AD
1526static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1527{
e2a75f88
AD
1528 const char *chip_name;
1529 char fw_name[30];
1530 int err;
1531 const struct gpu_info_firmware_header_v1_0 *hdr;
1532
ab4fe3e1
HR
1533 adev->firmware.gpu_info_fw = NULL;
1534
e2a75f88
AD
1535 switch (adev->asic_type) {
1536 case CHIP_TOPAZ:
1537 case CHIP_TONGA:
1538 case CHIP_FIJI:
1539 case CHIP_POLARIS11:
1540 case CHIP_POLARIS10:
1541 case CHIP_POLARIS12:
1542 case CHIP_CARRIZO:
1543 case CHIP_STONEY:
1544#ifdef CONFIG_DRM_AMDGPU_SI
1545 case CHIP_VERDE:
1546 case CHIP_TAHITI:
1547 case CHIP_PITCAIRN:
1548 case CHIP_OLAND:
1549 case CHIP_HAINAN:
1550#endif
1551#ifdef CONFIG_DRM_AMDGPU_CIK
1552 case CHIP_BONAIRE:
1553 case CHIP_HAWAII:
1554 case CHIP_KAVERI:
1555 case CHIP_KABINI:
1556 case CHIP_MULLINS:
1557#endif
1558 default:
1559 return 0;
1560 case CHIP_VEGA10:
1561 chip_name = "vega10";
1562 break;
2d2e5e7e
AD
1563 case CHIP_RAVEN:
1564 chip_name = "raven";
1565 break;
e2a75f88
AD
1566 }
1567
1568 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1569 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1570 if (err) {
1571 dev_err(adev->dev,
1572 "Failed to load gpu_info firmware \"%s\"\n",
1573 fw_name);
1574 goto out;
1575 }
ab4fe3e1 1576 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1577 if (err) {
1578 dev_err(adev->dev,
1579 "Failed to validate gpu_info firmware \"%s\"\n",
1580 fw_name);
1581 goto out;
1582 }
1583
ab4fe3e1 1584 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1585 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1586
1587 switch (hdr->version_major) {
1588 case 1:
1589 {
1590 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1591 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1592 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1593
b5ab16bf
AD
1594 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1595 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1596 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1597 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1598 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1599 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1600 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1601 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1602 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1603 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1604 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1605 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1606 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1607 adev->gfx.cu_info.max_waves_per_simd =
1608 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1609 adev->gfx.cu_info.max_scratch_slots_per_cu =
1610 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1611 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1612 break;
1613 }
1614 default:
1615 dev_err(adev->dev,
1616 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1617 err = -EINVAL;
1618 goto out;
1619 }
1620out:
e2a75f88
AD
1621 return err;
1622}
1623
d38ceaf9
AD
1624static int amdgpu_early_init(struct amdgpu_device *adev)
1625{
aaa36a97 1626 int i, r;
d38ceaf9 1627
483ef985 1628 amdgpu_device_enable_virtual_display(adev);
a6be7570 1629
d38ceaf9 1630 switch (adev->asic_type) {
aaa36a97
AD
1631 case CHIP_TOPAZ:
1632 case CHIP_TONGA:
48299f95 1633 case CHIP_FIJI:
2cc0c0b5
FC
1634 case CHIP_POLARIS11:
1635 case CHIP_POLARIS10:
c4642a47 1636 case CHIP_POLARIS12:
aaa36a97 1637 case CHIP_CARRIZO:
39bb0c92
SL
1638 case CHIP_STONEY:
1639 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1640 adev->family = AMDGPU_FAMILY_CZ;
1641 else
1642 adev->family = AMDGPU_FAMILY_VI;
1643
1644 r = vi_set_ip_blocks(adev);
1645 if (r)
1646 return r;
1647 break;
33f34802
KW
1648#ifdef CONFIG_DRM_AMDGPU_SI
1649 case CHIP_VERDE:
1650 case CHIP_TAHITI:
1651 case CHIP_PITCAIRN:
1652 case CHIP_OLAND:
1653 case CHIP_HAINAN:
295d0daf 1654 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1655 r = si_set_ip_blocks(adev);
1656 if (r)
1657 return r;
1658 break;
1659#endif
a2e73f56
AD
1660#ifdef CONFIG_DRM_AMDGPU_CIK
1661 case CHIP_BONAIRE:
1662 case CHIP_HAWAII:
1663 case CHIP_KAVERI:
1664 case CHIP_KABINI:
1665 case CHIP_MULLINS:
1666 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1667 adev->family = AMDGPU_FAMILY_CI;
1668 else
1669 adev->family = AMDGPU_FAMILY_KV;
1670
1671 r = cik_set_ip_blocks(adev);
1672 if (r)
1673 return r;
1674 break;
1675#endif
2ca8a5d2
CZ
1676 case CHIP_VEGA10:
1677 case CHIP_RAVEN:
1678 if (adev->asic_type == CHIP_RAVEN)
1679 adev->family = AMDGPU_FAMILY_RV;
1680 else
1681 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1682
1683 r = soc15_set_ip_blocks(adev);
1684 if (r)
1685 return r;
1686 break;
d38ceaf9
AD
1687 default:
1688 /* FIXME: not supported yet */
1689 return -EINVAL;
1690 }
1691
e2a75f88
AD
1692 r = amdgpu_device_parse_gpu_info_fw(adev);
1693 if (r)
1694 return r;
1695
1884734a 1696 amdgpu_amdkfd_device_probe(adev);
1697
3149d9da
XY
1698 if (amdgpu_sriov_vf(adev)) {
1699 r = amdgpu_virt_request_full_gpu(adev, true);
1700 if (r)
5ffa61c1 1701 return -EAGAIN;
3149d9da
XY
1702 }
1703
d38ceaf9
AD
1704 for (i = 0; i < adev->num_ip_blocks; i++) {
1705 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1706 DRM_ERROR("disabled ip block: %d <%s>\n",
1707 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1708 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1709 } else {
a1255107
AD
1710 if (adev->ip_blocks[i].version->funcs->early_init) {
1711 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1712 if (r == -ENOENT) {
a1255107 1713 adev->ip_blocks[i].status.valid = false;
2c1a2784 1714 } else if (r) {
a1255107
AD
1715 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1716 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1717 return r;
2c1a2784 1718 } else {
a1255107 1719 adev->ip_blocks[i].status.valid = true;
2c1a2784 1720 }
974e6b64 1721 } else {
a1255107 1722 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1723 }
d38ceaf9
AD
1724 }
1725 }
1726
395d1fb9
NH
1727 adev->cg_flags &= amdgpu_cg_mask;
1728 adev->pg_flags &= amdgpu_pg_mask;
1729
d38ceaf9
AD
1730 return 0;
1731}
1732
1733static int amdgpu_init(struct amdgpu_device *adev)
1734{
1735 int i, r;
1736
1737 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1738 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1739 continue;
a1255107 1740 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1741 if (r) {
a1255107
AD
1742 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1743 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1744 return r;
2c1a2784 1745 }
a1255107 1746 adev->ip_blocks[i].status.sw = true;
d38ceaf9 1747 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1748 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9 1749 r = amdgpu_vram_scratch_init(adev);
2c1a2784
AD
1750 if (r) {
1751 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1752 return r;
2c1a2784 1753 }
a1255107 1754 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1755 if (r) {
1756 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1757 return r;
2c1a2784 1758 }
d38ceaf9 1759 r = amdgpu_wb_init(adev);
2c1a2784
AD
1760 if (r) {
1761 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
d38ceaf9 1762 return r;
2c1a2784 1763 }
a1255107 1764 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1765
1766 /* right after GMC hw init, we create CSA */
1767 if (amdgpu_sriov_vf(adev)) {
1768 r = amdgpu_allocate_static_csa(adev);
1769 if (r) {
1770 DRM_ERROR("allocate CSA failed %d\n", r);
1771 return r;
1772 }
1773 }
d38ceaf9
AD
1774 }
1775 }
1776
1777 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1778 if (!adev->ip_blocks[i].status.sw)
d38ceaf9
AD
1779 continue;
1780 /* gmc hw init is done early */
a1255107 1781 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
d38ceaf9 1782 continue;
a1255107 1783 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1784 if (r) {
a1255107
AD
1785 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1786 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1787 return r;
2c1a2784 1788 }
a1255107 1789 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1790 }
1791
1884734a 1792 amdgpu_amdkfd_device_init(adev);
d38ceaf9
AD
1793 return 0;
1794}
1795
0c49e0b8
CZ
1796static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1797{
1798 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1799}
1800
1801static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1802{
1803 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1804 AMDGPU_RESET_MAGIC_NUM);
1805}
1806
2dc80b00 1807static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
d38ceaf9
AD
1808{
1809 int i = 0, r;
1810
1811 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1812 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1813 continue;
4a446d55 1814 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1815 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1816 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1817 /* enable clockgating to save power */
a1255107
AD
1818 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1819 AMD_CG_STATE_GATE);
4a446d55
AD
1820 if (r) {
1821 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1822 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1823 return r;
1824 }
b0b00ff1 1825 }
d38ceaf9 1826 }
2dc80b00
S
1827 return 0;
1828}
1829
1830static int amdgpu_late_init(struct amdgpu_device *adev)
1831{
1832 int i = 0, r;
1833
1834 for (i = 0; i < adev->num_ip_blocks; i++) {
1835 if (!adev->ip_blocks[i].status.valid)
1836 continue;
1837 if (adev->ip_blocks[i].version->funcs->late_init) {
1838 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1839 if (r) {
1840 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1841 adev->ip_blocks[i].version->funcs->name, r);
1842 return r;
1843 }
1844 adev->ip_blocks[i].status.late_initialized = true;
1845 }
1846 }
1847
1848 mod_delayed_work(system_wq, &adev->late_init_work,
1849 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1850
0c49e0b8 1851 amdgpu_fill_reset_magic(adev);
d38ceaf9
AD
1852
1853 return 0;
1854}
1855
1856static int amdgpu_fini(struct amdgpu_device *adev)
1857{
1858 int i, r;
1859
1884734a 1860 amdgpu_amdkfd_device_fini(adev);
3e96dbfd
AD
1861 /* need to disable SMC first */
1862 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1863 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1864 continue;
a1255107 1865 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1866 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1867 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1868 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1869 if (r) {
1870 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1871 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1872 return r;
1873 }
a1255107 1874 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1875 /* XXX handle errors */
1876 if (r) {
1877 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1878 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1879 }
a1255107 1880 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1881 break;
1882 }
1883 }
1884
d38ceaf9 1885 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1886 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1887 continue;
a1255107 1888 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9
AD
1889 amdgpu_wb_fini(adev);
1890 amdgpu_vram_scratch_fini(adev);
1891 }
8201a67a
RZ
1892
1893 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1894 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1895 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1896 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1897 AMD_CG_STATE_UNGATE);
1898 if (r) {
1899 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1900 adev->ip_blocks[i].version->funcs->name, r);
1901 return r;
1902 }
2c1a2784 1903 }
8201a67a 1904
a1255107 1905 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1906 /* XXX handle errors */
2c1a2784 1907 if (r) {
a1255107
AD
1908 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1909 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1910 }
8201a67a 1911
a1255107 1912 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1913 }
1914
1915 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1916 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1917 continue;
a1255107 1918 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1919 /* XXX handle errors */
2c1a2784 1920 if (r) {
a1255107
AD
1921 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1922 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1923 }
a1255107
AD
1924 adev->ip_blocks[i].status.sw = false;
1925 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1926 }
1927
a6dcfd9c 1928 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1929 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1930 continue;
a1255107
AD
1931 if (adev->ip_blocks[i].version->funcs->late_fini)
1932 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1933 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1934 }
1935
030308fc 1936 if (amdgpu_sriov_vf(adev))
3149d9da 1937 amdgpu_virt_release_full_gpu(adev, false);
2493664f 1938
d38ceaf9
AD
1939 return 0;
1940}
1941
2dc80b00
S
1942static void amdgpu_late_init_func_handler(struct work_struct *work)
1943{
1944 struct amdgpu_device *adev =
1945 container_of(work, struct amdgpu_device, late_init_work.work);
1946 amdgpu_late_set_cg_state(adev);
1947}
1948
faefba95 1949int amdgpu_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1950{
1951 int i, r;
1952
e941ea99
XY
1953 if (amdgpu_sriov_vf(adev))
1954 amdgpu_virt_request_full_gpu(adev, false);
1955
c5a93a28
FC
1956 /* ungate SMC block first */
1957 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1958 AMD_CG_STATE_UNGATE);
1959 if (r) {
1960 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1961 }
1962
d38ceaf9 1963 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1964 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1965 continue;
1966 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1967 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1968 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1969 AMD_CG_STATE_UNGATE);
c5a93a28 1970 if (r) {
a1255107
AD
1971 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1972 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1973 }
2c1a2784 1974 }
d38ceaf9 1975 /* XXX handle errors */
a1255107 1976 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1977 /* XXX handle errors */
2c1a2784 1978 if (r) {
a1255107
AD
1979 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1980 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1981 }
d38ceaf9
AD
1982 }
1983
e941ea99
XY
1984 if (amdgpu_sriov_vf(adev))
1985 amdgpu_virt_release_full_gpu(adev, false);
1986
d38ceaf9
AD
1987 return 0;
1988}
1989
e4f0fdcc 1990static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
a90ad3c2
ML
1991{
1992 int i, r;
1993
2cb681b6
ML
1994 static enum amd_ip_block_type ip_order[] = {
1995 AMD_IP_BLOCK_TYPE_GMC,
1996 AMD_IP_BLOCK_TYPE_COMMON,
2cb681b6
ML
1997 AMD_IP_BLOCK_TYPE_IH,
1998 };
a90ad3c2 1999
2cb681b6
ML
2000 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2001 int j;
2002 struct amdgpu_ip_block *block;
a90ad3c2 2003
2cb681b6
ML
2004 for (j = 0; j < adev->num_ip_blocks; j++) {
2005 block = &adev->ip_blocks[j];
2006
2007 if (block->version->type != ip_order[i] ||
2008 !block->status.valid)
2009 continue;
2010
2011 r = block->version->funcs->hw_init(adev);
2012 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
2013 }
2014 }
2015
2016 return 0;
2017}
2018
e4f0fdcc 2019static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
a90ad3c2
ML
2020{
2021 int i, r;
2022
2cb681b6
ML
2023 static enum amd_ip_block_type ip_order[] = {
2024 AMD_IP_BLOCK_TYPE_SMC,
ef4c166d 2025 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
2026 AMD_IP_BLOCK_TYPE_DCE,
2027 AMD_IP_BLOCK_TYPE_GFX,
2028 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
2029 AMD_IP_BLOCK_TYPE_UVD,
2030 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 2031 };
a90ad3c2 2032
2cb681b6
ML
2033 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2034 int j;
2035 struct amdgpu_ip_block *block;
a90ad3c2 2036
2cb681b6
ML
2037 for (j = 0; j < adev->num_ip_blocks; j++) {
2038 block = &adev->ip_blocks[j];
2039
2040 if (block->version->type != ip_order[i] ||
2041 !block->status.valid)
2042 continue;
2043
2044 r = block->version->funcs->hw_init(adev);
2045 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
2046 }
2047 }
2048
2049 return 0;
2050}
2051
fcf0649f 2052static int amdgpu_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
2053{
2054 int i, r;
2055
a90ad3c2
ML
2056 for (i = 0; i < adev->num_ip_blocks; i++) {
2057 if (!adev->ip_blocks[i].status.valid)
2058 continue;
a90ad3c2
ML
2059 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2060 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
fcf0649f
CZ
2061 adev->ip_blocks[i].version->type ==
2062 AMD_IP_BLOCK_TYPE_IH) {
2063 r = adev->ip_blocks[i].version->funcs->resume(adev);
2064 if (r) {
2065 DRM_ERROR("resume of IP block <%s> failed %d\n",
2066 adev->ip_blocks[i].version->funcs->name, r);
2067 return r;
2068 }
a90ad3c2
ML
2069 }
2070 }
2071
2072 return 0;
2073}
2074
fcf0649f 2075static int amdgpu_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2076{
2077 int i, r;
2078
2079 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2080 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2081 continue;
fcf0649f
CZ
2082 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2083 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2084 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
2085 continue;
a1255107 2086 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 2087 if (r) {
a1255107
AD
2088 DRM_ERROR("resume of IP block <%s> failed %d\n",
2089 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 2090 return r;
2c1a2784 2091 }
d38ceaf9
AD
2092 }
2093
2094 return 0;
2095}
2096
fcf0649f
CZ
2097static int amdgpu_resume(struct amdgpu_device *adev)
2098{
2099 int r;
2100
2101 r = amdgpu_resume_phase1(adev);
2102 if (r)
2103 return r;
2104 r = amdgpu_resume_phase2(adev);
2105
2106 return r;
2107}
2108
4e99a44e 2109static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 2110{
6867e1b5
ML
2111 if (amdgpu_sriov_vf(adev)) {
2112 if (adev->is_atom_fw) {
2113 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2114 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2115 } else {
2116 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2117 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2118 }
2119
2120 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2121 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 2122 }
048765ad
AR
2123}
2124
4562236b
HW
2125bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2126{
2127 switch (asic_type) {
2128#if defined(CONFIG_DRM_AMD_DC)
2129 case CHIP_BONAIRE:
2130 case CHIP_HAWAII:
0d6fbccb 2131 case CHIP_KAVERI:
4562236b
HW
2132 case CHIP_CARRIZO:
2133 case CHIP_STONEY:
2134 case CHIP_POLARIS11:
2135 case CHIP_POLARIS10:
2c8ad2d5 2136 case CHIP_POLARIS12:
4562236b
HW
2137 case CHIP_TONGA:
2138 case CHIP_FIJI:
2139#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
2140 return amdgpu_dc != 0;
4562236b 2141#endif
17b7cf8c
AD
2142 case CHIP_KABINI:
2143 case CHIP_MULLINS:
2144 return amdgpu_dc > 0;
42f8ffa1
HW
2145 case CHIP_VEGA10:
2146#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 2147 case CHIP_RAVEN:
42f8ffa1 2148#endif
fd187853 2149 return amdgpu_dc != 0;
4562236b
HW
2150#endif
2151 default:
2152 return false;
2153 }
2154}
2155
2156/**
2157 * amdgpu_device_has_dc_support - check if dc is supported
2158 *
2159 * @adev: amdgpu_device_pointer
2160 *
2161 * Returns true for supported, false for not supported
2162 */
2163bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2164{
2555039d
XY
2165 if (amdgpu_sriov_vf(adev))
2166 return false;
2167
4562236b
HW
2168 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2169}
2170
d38ceaf9
AD
2171/**
2172 * amdgpu_device_init - initialize the driver
2173 *
2174 * @adev: amdgpu_device pointer
2175 * @pdev: drm dev pointer
2176 * @pdev: pci dev pointer
2177 * @flags: driver flags
2178 *
2179 * Initializes the driver info and hw (all asics).
2180 * Returns 0 for success or an error on failure.
2181 * Called at driver startup.
2182 */
2183int amdgpu_device_init(struct amdgpu_device *adev,
2184 struct drm_device *ddev,
2185 struct pci_dev *pdev,
2186 uint32_t flags)
2187{
2188 int r, i;
2189 bool runtime = false;
95844d20 2190 u32 max_MBps;
d38ceaf9
AD
2191
2192 adev->shutdown = false;
2193 adev->dev = &pdev->dev;
2194 adev->ddev = ddev;
2195 adev->pdev = pdev;
2196 adev->flags = flags;
2f7d10b3 2197 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 2198 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
6f02a696 2199 adev->mc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
2200 adev->accel_working = false;
2201 adev->num_rings = 0;
2202 adev->mman.buffer_funcs = NULL;
2203 adev->mman.buffer_funcs_ring = NULL;
2204 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 2205 adev->vm_manager.vm_pte_num_rings = 0;
d38ceaf9 2206 adev->gart.gart_funcs = NULL;
f54d1867 2207 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 2208 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
2209
2210 adev->smc_rreg = &amdgpu_invalid_rreg;
2211 adev->smc_wreg = &amdgpu_invalid_wreg;
2212 adev->pcie_rreg = &amdgpu_invalid_rreg;
2213 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
2214 adev->pciep_rreg = &amdgpu_invalid_rreg;
2215 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2216 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2217 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2218 adev->didt_rreg = &amdgpu_invalid_rreg;
2219 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
2220 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2221 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2222 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2223 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2224
3e39ab90
AD
2225 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2226 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2227 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
2228
2229 /* mutex initialization are all done here so we
2230 * can recall function without having locking issues */
d38ceaf9 2231 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 2232 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
2233 mutex_init(&adev->pm.mutex);
2234 mutex_init(&adev->gfx.gpu_clock_mutex);
2235 mutex_init(&adev->srbm_mutex);
b8866c26 2236 mutex_init(&adev->gfx.pipe_reserve_mutex);
d38ceaf9 2237 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 2238 mutex_init(&adev->mn_lock);
e23b74aa 2239 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 2240 hash_init(adev->mn_hash);
13a752e3 2241 mutex_init(&adev->lock_reset);
d38ceaf9
AD
2242
2243 amdgpu_check_arguments(adev);
2244
d38ceaf9
AD
2245 spin_lock_init(&adev->mmio_idx_lock);
2246 spin_lock_init(&adev->smc_idx_lock);
2247 spin_lock_init(&adev->pcie_idx_lock);
2248 spin_lock_init(&adev->uvd_ctx_idx_lock);
2249 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 2250 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 2251 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 2252 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 2253 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 2254
0c4e7fa5
CZ
2255 INIT_LIST_HEAD(&adev->shadow_list);
2256 mutex_init(&adev->shadow_list_lock);
2257
795f2813
AR
2258 INIT_LIST_HEAD(&adev->ring_lru_list);
2259 spin_lock_init(&adev->ring_lru_list_lock);
2260
2dc80b00
S
2261 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2262
0fa49558
AX
2263 /* Registers mapping */
2264 /* TODO: block userspace mapping of io register */
da69c161
KW
2265 if (adev->asic_type >= CHIP_BONAIRE) {
2266 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2267 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2268 } else {
2269 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2270 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2271 }
d38ceaf9 2272
d38ceaf9
AD
2273 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2274 if (adev->rmmio == NULL) {
2275 return -ENOMEM;
2276 }
2277 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2278 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2279
705e519e
CK
2280 /* doorbell bar mapping */
2281 amdgpu_doorbell_init(adev);
d38ceaf9
AD
2282
2283 /* io port mapping */
2284 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2285 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2286 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2287 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2288 break;
2289 }
2290 }
2291 if (adev->rio_mem == NULL)
b64a18c5 2292 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
2293
2294 /* early init functions */
2295 r = amdgpu_early_init(adev);
2296 if (r)
2297 return r;
2298
2299 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2300 /* this will fail for cards that aren't VGA class devices, just
2301 * ignore it */
2302 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2303
2304 if (amdgpu_runtime_pm == 1)
2305 runtime = true;
e9bef455 2306 if (amdgpu_device_is_px(ddev))
d38ceaf9 2307 runtime = true;
84c8b22e
LW
2308 if (!pci_is_thunderbolt_attached(adev->pdev))
2309 vga_switcheroo_register_client(adev->pdev,
2310 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
2311 if (runtime)
2312 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2313
2314 /* Read BIOS */
83ba126a
AD
2315 if (!amdgpu_get_bios(adev)) {
2316 r = -EINVAL;
2317 goto failed;
2318 }
f7e9e9fe 2319
d38ceaf9 2320 r = amdgpu_atombios_init(adev);
2c1a2784
AD
2321 if (r) {
2322 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 2323 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 2324 goto failed;
2c1a2784 2325 }
d38ceaf9 2326
4e99a44e
ML
2327 /* detect if we are with an SRIOV vbios */
2328 amdgpu_device_detect_sriov_bios(adev);
048765ad 2329
d38ceaf9 2330 /* Post card if necessary */
91fe77eb 2331 if (amdgpu_need_post(adev)) {
d38ceaf9 2332 if (!adev->bios) {
bec86378 2333 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
2334 r = -EINVAL;
2335 goto failed;
d38ceaf9 2336 }
bec86378 2337 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
2338 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2339 if (r) {
2340 dev_err(adev->dev, "gpu post error!\n");
2341 goto failed;
2342 }
d38ceaf9
AD
2343 }
2344
88b64e95
AD
2345 if (adev->is_atom_fw) {
2346 /* Initialize clocks */
2347 r = amdgpu_atomfirmware_get_clock_info(adev);
2348 if (r) {
2349 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 2350 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
2351 goto failed;
2352 }
2353 } else {
a5bde2f9
AD
2354 /* Initialize clocks */
2355 r = amdgpu_atombios_get_clock_info(adev);
2356 if (r) {
2357 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 2358 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 2359 goto failed;
a5bde2f9
AD
2360 }
2361 /* init i2c buses */
4562236b
HW
2362 if (!amdgpu_device_has_dc_support(adev))
2363 amdgpu_atombios_i2c_init(adev);
2c1a2784 2364 }
d38ceaf9
AD
2365
2366 /* Fence driver */
2367 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
2368 if (r) {
2369 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 2370 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 2371 goto failed;
2c1a2784 2372 }
d38ceaf9
AD
2373
2374 /* init the mode config */
2375 drm_mode_config_init(adev->ddev);
2376
2377 r = amdgpu_init(adev);
2378 if (r) {
8840a387 2379 /* failed in exclusive mode due to timeout */
2380 if (amdgpu_sriov_vf(adev) &&
2381 !amdgpu_sriov_runtime(adev) &&
2382 amdgpu_virt_mmio_blocked(adev) &&
2383 !amdgpu_virt_wait_reset(adev)) {
2384 dev_err(adev->dev, "VF exclusive mode timeout\n");
2385 r = -EAGAIN;
2386 goto failed;
2387 }
2c1a2784 2388 dev_err(adev->dev, "amdgpu_init failed\n");
e23b74aa 2389 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
d38ceaf9 2390 amdgpu_fini(adev);
83ba126a 2391 goto failed;
d38ceaf9
AD
2392 }
2393
2394 adev->accel_working = true;
2395
e59c0205
AX
2396 amdgpu_vm_check_compute_bug(adev);
2397
95844d20
MO
2398 /* Initialize the buffer migration limit. */
2399 if (amdgpu_moverate >= 0)
2400 max_MBps = amdgpu_moverate;
2401 else
2402 max_MBps = 8; /* Allow 8 MB/s. */
2403 /* Get a log2 for easy divisions. */
2404 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2405
d38ceaf9
AD
2406 r = amdgpu_ib_pool_init(adev);
2407 if (r) {
2408 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 2409 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 2410 goto failed;
d38ceaf9
AD
2411 }
2412
2413 r = amdgpu_ib_ring_tests(adev);
2414 if (r)
2415 DRM_ERROR("ib ring test failed (%d).\n", r);
2416
2dc8f81e
HC
2417 if (amdgpu_sriov_vf(adev))
2418 amdgpu_virt_init_data_exchange(adev);
2419
9bc92b9c
ML
2420 amdgpu_fbdev_init(adev);
2421
d2f52ac8
RZ
2422 r = amdgpu_pm_sysfs_init(adev);
2423 if (r)
2424 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2425
d38ceaf9 2426 r = amdgpu_gem_debugfs_init(adev);
3f14e623 2427 if (r)
d38ceaf9 2428 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2429
2430 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2431 if (r)
d38ceaf9 2432 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2433
4f0955fc
HR
2434 r = amdgpu_debugfs_test_ib_ring_init(adev);
2435 if (r)
2436 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
2437
50ab2533 2438 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2439 if (r)
50ab2533 2440 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2441
db95e218
KR
2442 r = amdgpu_debugfs_vbios_dump_init(adev);
2443 if (r)
2444 DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
2445
d38ceaf9
AD
2446 if ((amdgpu_testing & 1)) {
2447 if (adev->accel_working)
2448 amdgpu_test_moves(adev);
2449 else
2450 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2451 }
d38ceaf9
AD
2452 if (amdgpu_benchmarking) {
2453 if (adev->accel_working)
2454 amdgpu_benchmark(adev, amdgpu_benchmarking);
2455 else
2456 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2457 }
2458
2459 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2460 * explicit gating rather than handling it automatically.
2461 */
2462 r = amdgpu_late_init(adev);
2c1a2784
AD
2463 if (r) {
2464 dev_err(adev->dev, "amdgpu_late_init failed\n");
e23b74aa 2465 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2466 goto failed;
2c1a2784 2467 }
d38ceaf9
AD
2468
2469 return 0;
83ba126a
AD
2470
2471failed:
89041940 2472 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2473 if (runtime)
2474 vga_switcheroo_fini_domain_pm_ops(adev->dev);
8840a387 2475
83ba126a 2476 return r;
d38ceaf9
AD
2477}
2478
d38ceaf9
AD
2479/**
2480 * amdgpu_device_fini - tear down the driver
2481 *
2482 * @adev: amdgpu_device pointer
2483 *
2484 * Tear down the driver info (all asics).
2485 * Called at driver shutdown.
2486 */
2487void amdgpu_device_fini(struct amdgpu_device *adev)
2488{
2489 int r;
2490
2491 DRM_INFO("amdgpu: finishing device.\n");
2492 adev->shutdown = true;
db2c2a97
PD
2493 if (adev->mode_info.mode_config_initialized)
2494 drm_crtc_force_disable_all(adev->ddev);
d38ceaf9
AD
2495 /* evict vram memory */
2496 amdgpu_bo_evict_vram(adev);
2497 amdgpu_ib_pool_fini(adev);
a05502e5 2498 amdgpu_fw_reserve_vram_fini(adev);
d38ceaf9
AD
2499 amdgpu_fence_driver_fini(adev);
2500 amdgpu_fbdev_fini(adev);
2501 r = amdgpu_fini(adev);
ab4fe3e1
HR
2502 if (adev->firmware.gpu_info_fw) {
2503 release_firmware(adev->firmware.gpu_info_fw);
2504 adev->firmware.gpu_info_fw = NULL;
2505 }
d38ceaf9 2506 adev->accel_working = false;
2dc80b00 2507 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2508 /* free i2c buses */
4562236b
HW
2509 if (!amdgpu_device_has_dc_support(adev))
2510 amdgpu_i2c_fini(adev);
d38ceaf9
AD
2511 amdgpu_atombios_fini(adev);
2512 kfree(adev->bios);
2513 adev->bios = NULL;
84c8b22e
LW
2514 if (!pci_is_thunderbolt_attached(adev->pdev))
2515 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2516 if (adev->flags & AMD_IS_PX)
2517 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2518 vga_client_register(adev->pdev, NULL, NULL, NULL);
2519 if (adev->rio_mem)
2520 pci_iounmap(adev->pdev, adev->rio_mem);
2521 adev->rio_mem = NULL;
2522 iounmap(adev->rmmio);
2523 adev->rmmio = NULL;
705e519e 2524 amdgpu_doorbell_fini(adev);
d2f52ac8 2525 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2526 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2527}
2528
2529
2530/*
2531 * Suspend & resume.
2532 */
2533/**
810ddc3a 2534 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2535 *
2536 * @pdev: drm dev pointer
2537 * @state: suspend state
2538 *
2539 * Puts the hw in the suspend state (all asics).
2540 * Returns 0 for success or an error on failure.
2541 * Called at driver suspend.
2542 */
810ddc3a 2543int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2544{
2545 struct amdgpu_device *adev;
2546 struct drm_crtc *crtc;
2547 struct drm_connector *connector;
5ceb54c6 2548 int r;
d38ceaf9
AD
2549
2550 if (dev == NULL || dev->dev_private == NULL) {
2551 return -ENODEV;
2552 }
2553
2554 adev = dev->dev_private;
2555
2556 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2557 return 0;
2558
2559 drm_kms_helper_poll_disable(dev);
2560
4562236b
HW
2561 if (!amdgpu_device_has_dc_support(adev)) {
2562 /* turn off display hw */
2563 drm_modeset_lock_all(dev);
2564 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2565 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2566 }
2567 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2568 }
2569
ba997709
YZ
2570 amdgpu_amdkfd_suspend(adev);
2571
756e6880 2572 /* unpin the front buffers and cursors */
d38ceaf9 2573 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2574 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2575 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2576 struct amdgpu_bo *robj;
2577
756e6880
AD
2578 if (amdgpu_crtc->cursor_bo) {
2579 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2580 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2581 if (r == 0) {
2582 amdgpu_bo_unpin(aobj);
2583 amdgpu_bo_unreserve(aobj);
2584 }
2585 }
2586
d38ceaf9
AD
2587 if (rfb == NULL || rfb->obj == NULL) {
2588 continue;
2589 }
2590 robj = gem_to_amdgpu_bo(rfb->obj);
2591 /* don't unpin kernel fb objects */
2592 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2593 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2594 if (r == 0) {
2595 amdgpu_bo_unpin(robj);
2596 amdgpu_bo_unreserve(robj);
2597 }
2598 }
2599 }
2600 /* evict vram memory */
2601 amdgpu_bo_evict_vram(adev);
2602
5ceb54c6 2603 amdgpu_fence_driver_suspend(adev);
d38ceaf9
AD
2604
2605 r = amdgpu_suspend(adev);
2606
a0a71e49
AD
2607 /* evict remaining vram memory
2608 * This second call to evict vram is to evict the gart page table
2609 * using the CPU.
2610 */
d38ceaf9
AD
2611 amdgpu_bo_evict_vram(adev);
2612
d05da0e2 2613 amdgpu_atombios_scratch_regs_save(adev);
d38ceaf9
AD
2614 pci_save_state(dev->pdev);
2615 if (suspend) {
2616 /* Shut down the device */
2617 pci_disable_device(dev->pdev);
2618 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2619 } else {
2620 r = amdgpu_asic_reset(adev);
2621 if (r)
2622 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2623 }
2624
2625 if (fbcon) {
2626 console_lock();
2627 amdgpu_fbdev_set_suspend(adev, 1);
2628 console_unlock();
2629 }
2630 return 0;
2631}
2632
2633/**
810ddc3a 2634 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2635 *
2636 * @pdev: drm dev pointer
2637 *
2638 * Bring the hw back to operating state (all asics).
2639 * Returns 0 for success or an error on failure.
2640 * Called at driver resume.
2641 */
810ddc3a 2642int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2643{
2644 struct drm_connector *connector;
2645 struct amdgpu_device *adev = dev->dev_private;
756e6880 2646 struct drm_crtc *crtc;
03161a6e 2647 int r = 0;
d38ceaf9
AD
2648
2649 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2650 return 0;
2651
74b0b157 2652 if (fbcon)
d38ceaf9 2653 console_lock();
74b0b157 2654
d38ceaf9
AD
2655 if (resume) {
2656 pci_set_power_state(dev->pdev, PCI_D0);
2657 pci_restore_state(dev->pdev);
74b0b157 2658 r = pci_enable_device(dev->pdev);
03161a6e
HR
2659 if (r)
2660 goto unlock;
d38ceaf9 2661 }
d05da0e2 2662 amdgpu_atombios_scratch_regs_restore(adev);
d38ceaf9
AD
2663
2664 /* post card */
c836fec5 2665 if (amdgpu_need_post(adev)) {
74b0b157 2666 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2667 if (r)
2668 DRM_ERROR("amdgpu asic init failed\n");
2669 }
d38ceaf9
AD
2670
2671 r = amdgpu_resume(adev);
e6707218 2672 if (r) {
ca198528 2673 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
03161a6e 2674 goto unlock;
e6707218 2675 }
5ceb54c6
AD
2676 amdgpu_fence_driver_resume(adev);
2677
ca198528
FC
2678 if (resume) {
2679 r = amdgpu_ib_ring_tests(adev);
2680 if (r)
2681 DRM_ERROR("ib ring test failed (%d).\n", r);
2682 }
d38ceaf9
AD
2683
2684 r = amdgpu_late_init(adev);
03161a6e
HR
2685 if (r)
2686 goto unlock;
d38ceaf9 2687
756e6880
AD
2688 /* pin cursors */
2689 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2690 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2691
2692 if (amdgpu_crtc->cursor_bo) {
2693 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2694 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2695 if (r == 0) {
2696 r = amdgpu_bo_pin(aobj,
2697 AMDGPU_GEM_DOMAIN_VRAM,
2698 &amdgpu_crtc->cursor_addr);
2699 if (r != 0)
2700 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2701 amdgpu_bo_unreserve(aobj);
2702 }
2703 }
2704 }
ba997709
YZ
2705 r = amdgpu_amdkfd_resume(adev);
2706 if (r)
2707 return r;
756e6880 2708
d38ceaf9
AD
2709 /* blat the mode back in */
2710 if (fbcon) {
4562236b
HW
2711 if (!amdgpu_device_has_dc_support(adev)) {
2712 /* pre DCE11 */
2713 drm_helper_resume_force_mode(dev);
2714
2715 /* turn on display hw */
2716 drm_modeset_lock_all(dev);
2717 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2718 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2719 }
2720 drm_modeset_unlock_all(dev);
2721 } else {
2722 /*
2723 * There is no equivalent atomic helper to turn on
2724 * display, so we defined our own function for this,
2725 * once suspend resume is supported by the atomic
2726 * framework this will be reworked
2727 */
2728 amdgpu_dm_display_resume(adev);
d38ceaf9
AD
2729 }
2730 }
2731
2732 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2733
2734 /*
2735 * Most of the connector probing functions try to acquire runtime pm
2736 * refs to ensure that the GPU is powered on when connector polling is
2737 * performed. Since we're calling this from a runtime PM callback,
2738 * trying to acquire rpm refs will cause us to deadlock.
2739 *
2740 * Since we're guaranteed to be holding the rpm lock, it's safe to
2741 * temporarily disable the rpm helpers so this doesn't deadlock us.
2742 */
2743#ifdef CONFIG_PM
2744 dev->dev->power.disable_depth++;
2745#endif
4562236b
HW
2746 if (!amdgpu_device_has_dc_support(adev))
2747 drm_helper_hpd_irq_event(dev);
2748 else
2749 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2750#ifdef CONFIG_PM
2751 dev->dev->power.disable_depth--;
2752#endif
d38ceaf9 2753
03161a6e 2754 if (fbcon)
d38ceaf9 2755 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2756
2757unlock:
2758 if (fbcon)
d38ceaf9 2759 console_unlock();
d38ceaf9 2760
03161a6e 2761 return r;
d38ceaf9
AD
2762}
2763
63fbf42f
CZ
2764static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2765{
2766 int i;
2767 bool asic_hang = false;
2768
f993d628
ML
2769 if (amdgpu_sriov_vf(adev))
2770 return true;
2771
63fbf42f 2772 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2773 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2774 continue;
a1255107
AD
2775 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2776 adev->ip_blocks[i].status.hang =
2777 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2778 if (adev->ip_blocks[i].status.hang) {
2779 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2780 asic_hang = true;
2781 }
2782 }
2783 return asic_hang;
2784}
2785
4d446656 2786static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2787{
2788 int i, r = 0;
2789
2790 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2791 if (!adev->ip_blocks[i].status.valid)
d31a501e 2792 continue;
a1255107
AD
2793 if (adev->ip_blocks[i].status.hang &&
2794 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2795 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2796 if (r)
2797 return r;
2798 }
2799 }
2800
2801 return 0;
2802}
2803
35d782fe
CZ
2804static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2805{
da146d3b
AD
2806 int i;
2807
2808 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2809 if (!adev->ip_blocks[i].status.valid)
da146d3b 2810 continue;
a1255107
AD
2811 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2812 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2813 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
2814 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2815 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 2816 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2817 DRM_INFO("Some block need full reset!\n");
2818 return true;
2819 }
2820 }
35d782fe
CZ
2821 }
2822 return false;
2823}
2824
2825static int amdgpu_soft_reset(struct amdgpu_device *adev)
2826{
2827 int i, r = 0;
2828
2829 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2830 if (!adev->ip_blocks[i].status.valid)
35d782fe 2831 continue;
a1255107
AD
2832 if (adev->ip_blocks[i].status.hang &&
2833 adev->ip_blocks[i].version->funcs->soft_reset) {
2834 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2835 if (r)
2836 return r;
2837 }
2838 }
2839
2840 return 0;
2841}
2842
2843static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2844{
2845 int i, r = 0;
2846
2847 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2848 if (!adev->ip_blocks[i].status.valid)
35d782fe 2849 continue;
a1255107
AD
2850 if (adev->ip_blocks[i].status.hang &&
2851 adev->ip_blocks[i].version->funcs->post_soft_reset)
2852 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2853 if (r)
2854 return r;
2855 }
2856
2857 return 0;
2858}
2859
3ad81f16
CZ
2860bool amdgpu_need_backup(struct amdgpu_device *adev)
2861{
2862 if (adev->flags & AMD_IS_APU)
2863 return false;
2864
2865 return amdgpu_lockup_timeout > 0 ? true : false;
2866}
2867
53cdccd5
CZ
2868static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2869 struct amdgpu_ring *ring,
2870 struct amdgpu_bo *bo,
f54d1867 2871 struct dma_fence **fence)
53cdccd5
CZ
2872{
2873 uint32_t domain;
2874 int r;
2875
23d2e504
RH
2876 if (!bo->shadow)
2877 return 0;
2878
1d284797 2879 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2880 if (r)
2881 return r;
2882 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2883 /* if bo has been evicted, then no need to recover */
2884 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2885 r = amdgpu_bo_validate(bo->shadow);
2886 if (r) {
2887 DRM_ERROR("bo validate failed!\n");
2888 goto err;
2889 }
2890
23d2e504 2891 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2892 NULL, fence, true);
23d2e504
RH
2893 if (r) {
2894 DRM_ERROR("recover page table failed!\n");
2895 goto err;
2896 }
2897 }
53cdccd5 2898err:
23d2e504
RH
2899 amdgpu_bo_unreserve(bo);
2900 return r;
53cdccd5
CZ
2901}
2902
5740682e
ML
2903/*
2904 * amdgpu_reset - reset ASIC/GPU for bare-metal or passthrough
a90ad3c2
ML
2905 *
2906 * @adev: amdgpu device pointer
5740682e 2907 * @reset_flags: output param tells caller the reset result
a90ad3c2 2908 *
5740682e
ML
2909 * attempt to do soft-reset or full-reset and reinitialize Asic
2910 * return 0 means successed otherwise failed
2911*/
2912static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
a90ad3c2 2913{
5740682e
ML
2914 bool need_full_reset, vram_lost = 0;
2915 int r;
a90ad3c2 2916
5740682e 2917 need_full_reset = amdgpu_need_full_reset(adev);
a90ad3c2 2918
5740682e
ML
2919 if (!need_full_reset) {
2920 amdgpu_pre_soft_reset(adev);
2921 r = amdgpu_soft_reset(adev);
2922 amdgpu_post_soft_reset(adev);
2923 if (r || amdgpu_check_soft_reset(adev)) {
2924 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2925 need_full_reset = true;
2926 }
a90ad3c2 2927
5740682e 2928 }
a90ad3c2 2929
5740682e
ML
2930 if (need_full_reset) {
2931 r = amdgpu_suspend(adev);
a90ad3c2 2932
5740682e
ML
2933retry:
2934 amdgpu_atombios_scratch_regs_save(adev);
2935 r = amdgpu_asic_reset(adev);
2936 amdgpu_atombios_scratch_regs_restore(adev);
2937 /* post card */
2938 amdgpu_atom_asic_init(adev->mode_info.atom_context);
65781c78 2939
5740682e
ML
2940 if (!r) {
2941 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2942 r = amdgpu_resume_phase1(adev);
2943 if (r)
2944 goto out;
65781c78 2945
5740682e
ML
2946 vram_lost = amdgpu_check_vram_lost(adev);
2947 if (vram_lost) {
2948 DRM_ERROR("VRAM is lost!\n");
2949 atomic_inc(&adev->vram_lost_counter);
2950 }
2951
c1c7ce8f
CK
2952 r = amdgpu_gtt_mgr_recover(
2953 &adev->mman.bdev.man[TTM_PL_TT]);
5740682e
ML
2954 if (r)
2955 goto out;
2956
2957 r = amdgpu_resume_phase2(adev);
2958 if (r)
2959 goto out;
2960
2961 if (vram_lost)
2962 amdgpu_fill_reset_magic(adev);
65781c78 2963 }
5740682e 2964 }
65781c78 2965
5740682e
ML
2966out:
2967 if (!r) {
2968 amdgpu_irq_gpu_reset_resume_helper(adev);
2969 r = amdgpu_ib_ring_tests(adev);
2970 if (r) {
2971 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
2972 r = amdgpu_suspend(adev);
2973 need_full_reset = true;
2974 goto retry;
2975 }
2976 }
65781c78 2977
5740682e
ML
2978 if (reset_flags) {
2979 if (vram_lost)
2980 (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
a90ad3c2 2981
5740682e
ML
2982 if (need_full_reset)
2983 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
65781c78 2984 }
a90ad3c2 2985
5740682e
ML
2986 return r;
2987}
a90ad3c2 2988
5740682e
ML
2989/*
2990 * amdgpu_reset_sriov - reset ASIC for SR-IOV vf
2991 *
2992 * @adev: amdgpu device pointer
2993 * @reset_flags: output param tells caller the reset result
2994 *
2995 * do VF FLR and reinitialize Asic
2996 * return 0 means successed otherwise failed
2997*/
2998static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, bool from_hypervisor)
2999{
3000 int r;
3001
3002 if (from_hypervisor)
3003 r = amdgpu_virt_request_full_gpu(adev, true);
3004 else
3005 r = amdgpu_virt_reset_gpu(adev);
3006 if (r)
3007 return r;
a90ad3c2
ML
3008
3009 /* Resume IP prior to SMC */
5740682e
ML
3010 r = amdgpu_sriov_reinit_early(adev);
3011 if (r)
3012 goto error;
a90ad3c2
ML
3013
3014 /* we need recover gart prior to run SMC/CP/SDMA resume */
c1c7ce8f 3015 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
a90ad3c2
ML
3016
3017 /* now we are okay to resume SMC/CP/SDMA */
5740682e
ML
3018 r = amdgpu_sriov_reinit_late(adev);
3019 if (r)
3020 goto error;
a90ad3c2
ML
3021
3022 amdgpu_irq_gpu_reset_resume_helper(adev);
5740682e
ML
3023 r = amdgpu_ib_ring_tests(adev);
3024 if (r)
a90ad3c2
ML
3025 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
3026
5740682e 3027error:
a90ad3c2
ML
3028 /* release full control of GPU after ib test */
3029 amdgpu_virt_release_full_gpu(adev, true);
3030
5740682e
ML
3031 if (reset_flags) {
3032 /* will get vram_lost from GIM in future, now all
3033 * reset request considered VRAM LOST
3034 */
3035 (*reset_flags) |= ~AMDGPU_RESET_INFO_VRAM_LOST;
3036 atomic_inc(&adev->vram_lost_counter);
a90ad3c2 3037
5740682e
ML
3038 /* VF FLR or hotlink reset is always full-reset */
3039 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
a90ad3c2
ML
3040 }
3041
3042 return r;
3043}
3044
d38ceaf9 3045/**
5740682e 3046 * amdgpu_gpu_recover - reset the asic and recover scheduler
d38ceaf9
AD
3047 *
3048 * @adev: amdgpu device pointer
5740682e 3049 * @job: which job trigger hang
d38ceaf9 3050 *
5740682e 3051 * Attempt to reset the GPU if it has hung (all asics).
d38ceaf9
AD
3052 * Returns 0 for success or an error on failure.
3053 */
5740682e 3054int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
d38ceaf9 3055{
4562236b 3056 struct drm_atomic_state *state = NULL;
5740682e
ML
3057 uint64_t reset_flags = 0;
3058 int i, r, resched;
fb140b29 3059
63fbf42f
CZ
3060 if (!amdgpu_check_soft_reset(adev)) {
3061 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
3062 return 0;
3063 }
d38ceaf9 3064
5740682e
ML
3065 dev_info(adev->dev, "GPU reset begin!\n");
3066
13a752e3 3067 mutex_lock(&adev->lock_reset);
d94aed5a 3068 atomic_inc(&adev->gpu_reset_counter);
13a752e3 3069 adev->in_gpu_reset = 1;
d38ceaf9 3070
a3c47d6b
CZ
3071 /* block TTM */
3072 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
4562236b
HW
3073 /* store modesetting */
3074 if (amdgpu_device_has_dc_support(adev))
3075 state = drm_atomic_helper_suspend(adev->ddev);
a3c47d6b 3076
0875dc9e
CZ
3077 /* block scheduler */
3078 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3079 struct amdgpu_ring *ring = adev->rings[i];
3080
51687759 3081 if (!ring || !ring->sched.thread)
0875dc9e 3082 continue;
5740682e
ML
3083
3084 /* only focus on the ring hit timeout if &job not NULL */
3085 if (job && job->ring->idx != i)
3086 continue;
3087
0875dc9e 3088 kthread_park(ring->sched.thread);
5740682e
ML
3089 amd_sched_hw_job_reset(&ring->sched, &job->base);
3090
2f9d4084
ML
3091 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3092 amdgpu_fence_driver_force_completion(ring);
0875dc9e 3093 }
d38ceaf9 3094
5740682e
ML
3095 if (amdgpu_sriov_vf(adev))
3096 r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true);
3097 else
3098 r = amdgpu_reset(adev, &reset_flags);
35d782fe 3099
d38ceaf9 3100 if (!r) {
5740682e
ML
3101 if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
3102 (reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) {
53cdccd5
CZ
3103 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
3104 struct amdgpu_bo *bo, *tmp;
f54d1867 3105 struct dma_fence *fence = NULL, *next = NULL;
53cdccd5
CZ
3106
3107 DRM_INFO("recover vram bo from shadow\n");
3108 mutex_lock(&adev->shadow_list_lock);
3109 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 3110 next = NULL;
53cdccd5
CZ
3111 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
3112 if (fence) {
f54d1867 3113 r = dma_fence_wait(fence, false);
53cdccd5 3114 if (r) {
1d7b17b0 3115 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5
CZ
3116 break;
3117 }
3118 }
1f465087 3119
f54d1867 3120 dma_fence_put(fence);
53cdccd5
CZ
3121 fence = next;
3122 }
3123 mutex_unlock(&adev->shadow_list_lock);
3124 if (fence) {
f54d1867 3125 r = dma_fence_wait(fence, false);
53cdccd5 3126 if (r)
1d7b17b0 3127 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5 3128 }
f54d1867 3129 dma_fence_put(fence);
53cdccd5 3130 }
5740682e 3131
d38ceaf9
AD
3132 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3133 struct amdgpu_ring *ring = adev->rings[i];
51687759
CZ
3134
3135 if (!ring || !ring->sched.thread)
d38ceaf9 3136 continue;
53cdccd5 3137
5740682e
ML
3138 /* only focus on the ring hit timeout if &job not NULL */
3139 if (job && job->ring->idx != i)
3140 continue;
3141
aa1c8900 3142 amd_sched_job_recovery(&ring->sched);
0875dc9e 3143 kthread_unpark(ring->sched.thread);
d38ceaf9 3144 }
d38ceaf9 3145 } else {
d38ceaf9 3146 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5740682e
ML
3147 struct amdgpu_ring *ring = adev->rings[i];
3148
3149 if (!ring || !ring->sched.thread)
3150 continue;
3151
3152 /* only focus on the ring hit timeout if &job not NULL */
3153 if (job && job->ring->idx != i)
3154 continue;
3155
3156 kthread_unpark(adev->rings[i]->sched.thread);
d38ceaf9
AD
3157 }
3158 }
3159
4562236b 3160 if (amdgpu_device_has_dc_support(adev)) {
5740682e
ML
3161 if (drm_atomic_helper_resume(adev->ddev, state))
3162 dev_info(adev->dev, "drm resume failed:%d\n", r);
4562236b 3163 amdgpu_dm_display_resume(adev);
5740682e 3164 } else {
4562236b 3165 drm_helper_resume_force_mode(adev->ddev);
5740682e 3166 }
d38ceaf9
AD
3167
3168 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
5740682e 3169
89041940 3170 if (r) {
d38ceaf9 3171 /* bad news, how to tell it to userspace ? */
5740682e
ML
3172 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3173 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3174 } else {
3175 dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
89041940 3176 }
d38ceaf9 3177
89041940 3178 amdgpu_vf_error_trans_all(adev);
13a752e3
ML
3179 adev->in_gpu_reset = 0;
3180 mutex_unlock(&adev->lock_reset);
d38ceaf9
AD
3181 return r;
3182}
3183
d0dd7f0c
AD
3184void amdgpu_get_pcie_info(struct amdgpu_device *adev)
3185{
3186 u32 mask;
3187 int ret;
3188
cd474ba0
AD
3189 if (amdgpu_pcie_gen_cap)
3190 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 3191
cd474ba0
AD
3192 if (amdgpu_pcie_lane_cap)
3193 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 3194
cd474ba0
AD
3195 /* covers APUs as well */
3196 if (pci_is_root_bus(adev->pdev->bus)) {
3197 if (adev->pm.pcie_gen_mask == 0)
3198 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3199 if (adev->pm.pcie_mlw_mask == 0)
3200 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 3201 return;
cd474ba0 3202 }
d0dd7f0c 3203
cd474ba0
AD
3204 if (adev->pm.pcie_gen_mask == 0) {
3205 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3206 if (!ret) {
3207 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3208 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3209 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3210
3211 if (mask & DRM_PCIE_SPEED_25)
3212 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3213 if (mask & DRM_PCIE_SPEED_50)
3214 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3215 if (mask & DRM_PCIE_SPEED_80)
3216 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3217 } else {
3218 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3219 }
3220 }
3221 if (adev->pm.pcie_mlw_mask == 0) {
3222 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3223 if (!ret) {
3224 switch (mask) {
3225 case 32:
3226 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3227 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3228 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3229 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3230 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3231 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3232 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3233 break;
3234 case 16:
3235 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3236 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3237 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3238 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3239 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3240 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3241 break;
3242 case 12:
3243 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3244 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3245 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3246 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3247 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3248 break;
3249 case 8:
3250 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3251 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3252 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3253 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3254 break;
3255 case 4:
3256 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3257 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3258 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3259 break;
3260 case 2:
3261 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3262 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3263 break;
3264 case 1:
3265 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3266 break;
3267 default:
3268 break;
3269 }
3270 } else {
3271 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
3272 }
3273 }
3274}
d38ceaf9
AD
3275
3276/*
3277 * Debugfs
3278 */
3279int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
06ab6832 3280 const struct drm_info_list *files,
d38ceaf9
AD
3281 unsigned nfiles)
3282{
3283 unsigned i;
3284
3285 for (i = 0; i < adev->debugfs_count; i++) {
3286 if (adev->debugfs[i].files == files) {
3287 /* Already registered */
3288 return 0;
3289 }
3290 }
3291
3292 i = adev->debugfs_count + 1;
3293 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3294 DRM_ERROR("Reached maximum number of debugfs components.\n");
3295 DRM_ERROR("Report so we increase "
3296 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3297 return -EINVAL;
3298 }
3299 adev->debugfs[adev->debugfs_count].files = files;
3300 adev->debugfs[adev->debugfs_count].num_files = nfiles;
3301 adev->debugfs_count = i;
3302#if defined(CONFIG_DEBUG_FS)
d38ceaf9
AD
3303 drm_debugfs_create_files(files, nfiles,
3304 adev->ddev->primary->debugfs_root,
3305 adev->ddev->primary);
3306#endif
3307 return 0;
3308}
3309
d38ceaf9
AD
3310#if defined(CONFIG_DEBUG_FS)
3311
3312static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3313 size_t size, loff_t *pos)
3314{
45063097 3315 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
3316 ssize_t result = 0;
3317 int r;
bd12267d 3318 bool pm_pg_lock, use_bank;
56628159 3319 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
3320
3321 if (size & 0x3 || *pos & 0x3)
3322 return -EINVAL;
3323
bd12267d
TSD
3324 /* are we reading registers for which a PG lock is necessary? */
3325 pm_pg_lock = (*pos >> 23) & 1;
3326
56628159 3327 if (*pos & (1ULL << 62)) {
0b968650
TSD
3328 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3329 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3330 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
32977f93
TSD
3331
3332 if (se_bank == 0x3FF)
3333 se_bank = 0xFFFFFFFF;
3334 if (sh_bank == 0x3FF)
3335 sh_bank = 0xFFFFFFFF;
3336 if (instance_bank == 0x3FF)
3337 instance_bank = 0xFFFFFFFF;
56628159 3338 use_bank = 1;
56628159
TSD
3339 } else {
3340 use_bank = 0;
3341 }
3342
801a6aa9 3343 *pos &= (1UL << 22) - 1;
bd12267d 3344
56628159 3345 if (use_bank) {
32977f93
TSD
3346 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3347 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
56628159
TSD
3348 return -EINVAL;
3349 mutex_lock(&adev->grbm_idx_mutex);
3350 amdgpu_gfx_select_se_sh(adev, se_bank,
3351 sh_bank, instance_bank);
3352 }
3353
bd12267d
TSD
3354 if (pm_pg_lock)
3355 mutex_lock(&adev->pm.mutex);
3356
d38ceaf9
AD
3357 while (size) {
3358 uint32_t value;
3359
3360 if (*pos > adev->rmmio_size)
56628159 3361 goto end;
d38ceaf9
AD
3362
3363 value = RREG32(*pos >> 2);
3364 r = put_user(value, (uint32_t *)buf);
56628159
TSD
3365 if (r) {
3366 result = r;
3367 goto end;
3368 }
d38ceaf9
AD
3369
3370 result += 4;
3371 buf += 4;
3372 *pos += 4;
3373 size -= 4;
3374 }
3375
56628159
TSD
3376end:
3377 if (use_bank) {
3378 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3379 mutex_unlock(&adev->grbm_idx_mutex);
3380 }
3381
bd12267d
TSD
3382 if (pm_pg_lock)
3383 mutex_unlock(&adev->pm.mutex);
3384
d38ceaf9
AD
3385 return result;
3386}
3387
3388static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3389 size_t size, loff_t *pos)
3390{
45063097 3391 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
3392 ssize_t result = 0;
3393 int r;
394fdde2
TSD
3394 bool pm_pg_lock, use_bank;
3395 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
3396
3397 if (size & 0x3 || *pos & 0x3)
3398 return -EINVAL;
3399
394fdde2
TSD
3400 /* are we reading registers for which a PG lock is necessary? */
3401 pm_pg_lock = (*pos >> 23) & 1;
3402
3403 if (*pos & (1ULL << 62)) {
0b968650
TSD
3404 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3405 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3406 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
394fdde2
TSD
3407
3408 if (se_bank == 0x3FF)
3409 se_bank = 0xFFFFFFFF;
3410 if (sh_bank == 0x3FF)
3411 sh_bank = 0xFFFFFFFF;
3412 if (instance_bank == 0x3FF)
3413 instance_bank = 0xFFFFFFFF;
3414 use_bank = 1;
3415 } else {
3416 use_bank = 0;
3417 }
3418
801a6aa9 3419 *pos &= (1UL << 22) - 1;
394fdde2
TSD
3420
3421 if (use_bank) {
3422 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3423 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3424 return -EINVAL;
3425 mutex_lock(&adev->grbm_idx_mutex);
3426 amdgpu_gfx_select_se_sh(adev, se_bank,
3427 sh_bank, instance_bank);
3428 }
3429
3430 if (pm_pg_lock)
3431 mutex_lock(&adev->pm.mutex);
3432
d38ceaf9
AD
3433 while (size) {
3434 uint32_t value;
3435
3436 if (*pos > adev->rmmio_size)
3437 return result;
3438
3439 r = get_user(value, (uint32_t *)buf);
3440 if (r)
3441 return r;
3442
3443 WREG32(*pos >> 2, value);
3444
3445 result += 4;
3446 buf += 4;
3447 *pos += 4;
3448 size -= 4;
3449 }
3450
394fdde2
TSD
3451 if (use_bank) {
3452 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3453 mutex_unlock(&adev->grbm_idx_mutex);
3454 }
3455
3456 if (pm_pg_lock)
3457 mutex_unlock(&adev->pm.mutex);
3458
d38ceaf9
AD
3459 return result;
3460}
3461
adcec288
TSD
3462static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3463 size_t size, loff_t *pos)
3464{
45063097 3465 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3466 ssize_t result = 0;
3467 int r;
3468
3469 if (size & 0x3 || *pos & 0x3)
3470 return -EINVAL;
3471
3472 while (size) {
3473 uint32_t value;
3474
3475 value = RREG32_PCIE(*pos >> 2);
3476 r = put_user(value, (uint32_t *)buf);
3477 if (r)
3478 return r;
3479
3480 result += 4;
3481 buf += 4;
3482 *pos += 4;
3483 size -= 4;
3484 }
3485
3486 return result;
3487}
3488
3489static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3490 size_t size, loff_t *pos)
3491{
45063097 3492 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3493 ssize_t result = 0;
3494 int r;
3495
3496 if (size & 0x3 || *pos & 0x3)
3497 return -EINVAL;
3498
3499 while (size) {
3500 uint32_t value;
3501
3502 r = get_user(value, (uint32_t *)buf);
3503 if (r)
3504 return r;
3505
3506 WREG32_PCIE(*pos >> 2, value);
3507
3508 result += 4;
3509 buf += 4;
3510 *pos += 4;
3511 size -= 4;
3512 }
3513
3514 return result;
3515}
3516
3517static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3518 size_t size, loff_t *pos)
3519{
45063097 3520 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3521 ssize_t result = 0;
3522 int r;
3523
3524 if (size & 0x3 || *pos & 0x3)
3525 return -EINVAL;
3526
3527 while (size) {
3528 uint32_t value;
3529
3530 value = RREG32_DIDT(*pos >> 2);
3531 r = put_user(value, (uint32_t *)buf);
3532 if (r)
3533 return r;
3534
3535 result += 4;
3536 buf += 4;
3537 *pos += 4;
3538 size -= 4;
3539 }
3540
3541 return result;
3542}
3543
3544static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3545 size_t size, loff_t *pos)
3546{
45063097 3547 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3548 ssize_t result = 0;
3549 int r;
3550
3551 if (size & 0x3 || *pos & 0x3)
3552 return -EINVAL;
3553
3554 while (size) {
3555 uint32_t value;
3556
3557 r = get_user(value, (uint32_t *)buf);
3558 if (r)
3559 return r;
3560
3561 WREG32_DIDT(*pos >> 2, value);
3562
3563 result += 4;
3564 buf += 4;
3565 *pos += 4;
3566 size -= 4;
3567 }
3568
3569 return result;
3570}
3571
3572static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3573 size_t size, loff_t *pos)
3574{
45063097 3575 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3576 ssize_t result = 0;
3577 int r;
3578
3579 if (size & 0x3 || *pos & 0x3)
3580 return -EINVAL;
3581
3582 while (size) {
3583 uint32_t value;
3584
6fc0deaf 3585 value = RREG32_SMC(*pos);
adcec288
TSD
3586 r = put_user(value, (uint32_t *)buf);
3587 if (r)
3588 return r;
3589
3590 result += 4;
3591 buf += 4;
3592 *pos += 4;
3593 size -= 4;
3594 }
3595
3596 return result;
3597}
3598
3599static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3600 size_t size, loff_t *pos)
3601{
45063097 3602 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3603 ssize_t result = 0;
3604 int r;
3605
3606 if (size & 0x3 || *pos & 0x3)
3607 return -EINVAL;
3608
3609 while (size) {
3610 uint32_t value;
3611
3612 r = get_user(value, (uint32_t *)buf);
3613 if (r)
3614 return r;
3615
6fc0deaf 3616 WREG32_SMC(*pos, value);
adcec288
TSD
3617
3618 result += 4;
3619 buf += 4;
3620 *pos += 4;
3621 size -= 4;
3622 }
3623
3624 return result;
3625}
3626
1e051413
TSD
3627static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3628 size_t size, loff_t *pos)
3629{
45063097 3630 struct amdgpu_device *adev = file_inode(f)->i_private;
1e051413
TSD
3631 ssize_t result = 0;
3632 int r;
3633 uint32_t *config, no_regs = 0;
3634
3635 if (size & 0x3 || *pos & 0x3)
3636 return -EINVAL;
3637
ecab7668 3638 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
1e051413
TSD
3639 if (!config)
3640 return -ENOMEM;
3641
3642 /* version, increment each time something is added */
9a999359 3643 config[no_regs++] = 3;
1e051413
TSD
3644 config[no_regs++] = adev->gfx.config.max_shader_engines;
3645 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3646 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3647 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3648 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3649 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3650 config[no_regs++] = adev->gfx.config.max_gprs;
3651 config[no_regs++] = adev->gfx.config.max_gs_threads;
3652 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3653 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3654 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3655 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3656 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3657 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3658 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3659 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3660 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3661 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3662 config[no_regs++] = adev->gfx.config.num_gpus;
3663 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3664 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3665 config[no_regs++] = adev->gfx.config.gb_addr_config;
3666 config[no_regs++] = adev->gfx.config.num_rbs;
3667
89a8f309
TSD
3668 /* rev==1 */
3669 config[no_regs++] = adev->rev_id;
3670 config[no_regs++] = adev->pg_flags;
3671 config[no_regs++] = adev->cg_flags;
3672
e9f11dc8
TSD
3673 /* rev==2 */
3674 config[no_regs++] = adev->family;
3675 config[no_regs++] = adev->external_rev_id;
3676
9a999359
TSD
3677 /* rev==3 */
3678 config[no_regs++] = adev->pdev->device;
3679 config[no_regs++] = adev->pdev->revision;
3680 config[no_regs++] = adev->pdev->subsystem_device;
3681 config[no_regs++] = adev->pdev->subsystem_vendor;
3682
1e051413
TSD
3683 while (size && (*pos < no_regs * 4)) {
3684 uint32_t value;
3685
3686 value = config[*pos >> 2];
3687 r = put_user(value, (uint32_t *)buf);
3688 if (r) {
3689 kfree(config);
3690 return r;
3691 }
3692
3693 result += 4;
3694 buf += 4;
3695 *pos += 4;
3696 size -= 4;
3697 }
3698
3699 kfree(config);
3700 return result;
3701}
3702
f2cdaf20
TSD
3703static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3704 size_t size, loff_t *pos)
3705{
45063097 3706 struct amdgpu_device *adev = file_inode(f)->i_private;
9f8df7d7
TSD
3707 int idx, x, outsize, r, valuesize;
3708 uint32_t values[16];
f2cdaf20 3709
9f8df7d7 3710 if (size & 3 || *pos & 0x3)
f2cdaf20
TSD
3711 return -EINVAL;
3712
3cbc614f
SP
3713 if (amdgpu_dpm == 0)
3714 return -EINVAL;
3715
f2cdaf20
TSD
3716 /* convert offset to sensor number */
3717 idx = *pos >> 2;
3718
9f8df7d7 3719 valuesize = sizeof(values);
f2cdaf20 3720 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
cd4d7464 3721 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
f2cdaf20
TSD
3722 else
3723 return -EINVAL;
3724
9f8df7d7
TSD
3725 if (size > valuesize)
3726 return -EINVAL;
3727
3728 outsize = 0;
3729 x = 0;
3730 if (!r) {
3731 while (size) {
3732 r = put_user(values[x++], (int32_t *)buf);
3733 buf += 4;
3734 size -= 4;
3735 outsize += 4;
3736 }
3737 }
f2cdaf20 3738
9f8df7d7 3739 return !r ? outsize : r;
f2cdaf20 3740}
1e051413 3741
273d7aa1
TSD
3742static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3743 size_t size, loff_t *pos)
3744{
3745 struct amdgpu_device *adev = f->f_inode->i_private;
3746 int r, x;
3747 ssize_t result=0;
472259f0 3748 uint32_t offset, se, sh, cu, wave, simd, data[32];
273d7aa1
TSD
3749
3750 if (size & 3 || *pos & 3)
3751 return -EINVAL;
3752
3753 /* decode offset */
0b968650
TSD
3754 offset = (*pos & GENMASK_ULL(6, 0));
3755 se = (*pos & GENMASK_ULL(14, 7)) >> 7;
3756 sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
3757 cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
3758 wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
3759 simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
273d7aa1
TSD
3760
3761 /* switch to the specific se/sh/cu */
3762 mutex_lock(&adev->grbm_idx_mutex);
3763 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3764
3765 x = 0;
472259f0
TSD
3766 if (adev->gfx.funcs->read_wave_data)
3767 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
273d7aa1
TSD
3768
3769 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3770 mutex_unlock(&adev->grbm_idx_mutex);
3771
5ecfb3b8
TSD
3772 if (!x)
3773 return -EINVAL;
3774
472259f0 3775 while (size && (offset < x * 4)) {
273d7aa1
TSD
3776 uint32_t value;
3777
472259f0 3778 value = data[offset >> 2];
273d7aa1
TSD
3779 r = put_user(value, (uint32_t *)buf);
3780 if (r)
3781 return r;
3782
3783 result += 4;
3784 buf += 4;
472259f0 3785 offset += 4;
273d7aa1
TSD
3786 size -= 4;
3787 }
3788
3789 return result;
3790}
3791
c5a60ce8
TSD
3792static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3793 size_t size, loff_t *pos)
3794{
3795 struct amdgpu_device *adev = f->f_inode->i_private;
3796 int r;
3797 ssize_t result = 0;
3798 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3799
3800 if (size & 3 || *pos & 3)
3801 return -EINVAL;
3802
3803 /* decode offset */
0b968650
TSD
3804 offset = *pos & GENMASK_ULL(11, 0);
3805 se = (*pos & GENMASK_ULL(19, 12)) >> 12;
3806 sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
3807 cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
3808 wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
3809 simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
3810 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
3811 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
c5a60ce8
TSD
3812
3813 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3814 if (!data)
3815 return -ENOMEM;
3816
3817 /* switch to the specific se/sh/cu */
3818 mutex_lock(&adev->grbm_idx_mutex);
3819 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3820
3821 if (bank == 0) {
3822 if (adev->gfx.funcs->read_wave_vgprs)
3823 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3824 } else {
3825 if (adev->gfx.funcs->read_wave_sgprs)
3826 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3827 }
3828
3829 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3830 mutex_unlock(&adev->grbm_idx_mutex);
3831
3832 while (size) {
3833 uint32_t value;
3834
3835 value = data[offset++];
3836 r = put_user(value, (uint32_t *)buf);
3837 if (r) {
3838 result = r;
3839 goto err;
3840 }
3841
3842 result += 4;
3843 buf += 4;
3844 size -= 4;
3845 }
3846
3847err:
3848 kfree(data);
3849 return result;
3850}
3851
d38ceaf9
AD
3852static const struct file_operations amdgpu_debugfs_regs_fops = {
3853 .owner = THIS_MODULE,
3854 .read = amdgpu_debugfs_regs_read,
3855 .write = amdgpu_debugfs_regs_write,
3856 .llseek = default_llseek
3857};
adcec288
TSD
3858static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3859 .owner = THIS_MODULE,
3860 .read = amdgpu_debugfs_regs_didt_read,
3861 .write = amdgpu_debugfs_regs_didt_write,
3862 .llseek = default_llseek
3863};
3864static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3865 .owner = THIS_MODULE,
3866 .read = amdgpu_debugfs_regs_pcie_read,
3867 .write = amdgpu_debugfs_regs_pcie_write,
3868 .llseek = default_llseek
3869};
3870static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3871 .owner = THIS_MODULE,
3872 .read = amdgpu_debugfs_regs_smc_read,
3873 .write = amdgpu_debugfs_regs_smc_write,
3874 .llseek = default_llseek
3875};
3876
1e051413
TSD
3877static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3878 .owner = THIS_MODULE,
3879 .read = amdgpu_debugfs_gca_config_read,
3880 .llseek = default_llseek
3881};
3882
f2cdaf20
TSD
3883static const struct file_operations amdgpu_debugfs_sensors_fops = {
3884 .owner = THIS_MODULE,
3885 .read = amdgpu_debugfs_sensor_read,
3886 .llseek = default_llseek
3887};
3888
273d7aa1
TSD
3889static const struct file_operations amdgpu_debugfs_wave_fops = {
3890 .owner = THIS_MODULE,
3891 .read = amdgpu_debugfs_wave_read,
3892 .llseek = default_llseek
3893};
c5a60ce8
TSD
3894static const struct file_operations amdgpu_debugfs_gpr_fops = {
3895 .owner = THIS_MODULE,
3896 .read = amdgpu_debugfs_gpr_read,
3897 .llseek = default_llseek
3898};
273d7aa1 3899
adcec288
TSD
3900static const struct file_operations *debugfs_regs[] = {
3901 &amdgpu_debugfs_regs_fops,
3902 &amdgpu_debugfs_regs_didt_fops,
3903 &amdgpu_debugfs_regs_pcie_fops,
3904 &amdgpu_debugfs_regs_smc_fops,
1e051413 3905 &amdgpu_debugfs_gca_config_fops,
f2cdaf20 3906 &amdgpu_debugfs_sensors_fops,
273d7aa1 3907 &amdgpu_debugfs_wave_fops,
c5a60ce8 3908 &amdgpu_debugfs_gpr_fops,
adcec288
TSD
3909};
3910
3911static const char *debugfs_regs_names[] = {
3912 "amdgpu_regs",
3913 "amdgpu_regs_didt",
3914 "amdgpu_regs_pcie",
3915 "amdgpu_regs_smc",
1e051413 3916 "amdgpu_gca_config",
f2cdaf20 3917 "amdgpu_sensors",
273d7aa1 3918 "amdgpu_wave",
c5a60ce8 3919 "amdgpu_gpr",
adcec288 3920};
d38ceaf9
AD
3921
3922static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3923{
3924 struct drm_minor *minor = adev->ddev->primary;
3925 struct dentry *ent, *root = minor->debugfs_root;
adcec288
TSD
3926 unsigned i, j;
3927
3928 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3929 ent = debugfs_create_file(debugfs_regs_names[i],
3930 S_IFREG | S_IRUGO, root,
3931 adev, debugfs_regs[i]);
3932 if (IS_ERR(ent)) {
3933 for (j = 0; j < i; j++) {
3934 debugfs_remove(adev->debugfs_regs[i]);
3935 adev->debugfs_regs[i] = NULL;
3936 }
3937 return PTR_ERR(ent);
3938 }
d38ceaf9 3939
adcec288
TSD
3940 if (!i)
3941 i_size_write(ent->d_inode, adev->rmmio_size);
3942 adev->debugfs_regs[i] = ent;
3943 }
d38ceaf9
AD
3944
3945 return 0;
3946}
3947
3948static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3949{
adcec288
TSD
3950 unsigned i;
3951
3952 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3953 if (adev->debugfs_regs[i]) {
3954 debugfs_remove(adev->debugfs_regs[i]);
3955 adev->debugfs_regs[i] = NULL;
3956 }
3957 }
d38ceaf9
AD
3958}
3959
4f0955fc
HR
3960static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3961{
3962 struct drm_info_node *node = (struct drm_info_node *) m->private;
3963 struct drm_device *dev = node->minor->dev;
3964 struct amdgpu_device *adev = dev->dev_private;
3965 int r = 0, i;
3966
3967 /* hold on the scheduler */
3968 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3969 struct amdgpu_ring *ring = adev->rings[i];
3970
3971 if (!ring || !ring->sched.thread)
3972 continue;
3973 kthread_park(ring->sched.thread);
3974 }
3975
3976 seq_printf(m, "run ib test:\n");
3977 r = amdgpu_ib_ring_tests(adev);
3978 if (r)
3979 seq_printf(m, "ib ring tests failed (%d).\n", r);
3980 else
3981 seq_printf(m, "ib ring tests passed.\n");
3982
3983 /* go on the scheduler */
3984 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3985 struct amdgpu_ring *ring = adev->rings[i];
3986
3987 if (!ring || !ring->sched.thread)
3988 continue;
3989 kthread_unpark(ring->sched.thread);
3990 }
3991
3992 return 0;
3993}
3994
3995static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
3996 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
3997};
3998
3999static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
4000{
4001 return amdgpu_debugfs_add_files(adev,
4002 amdgpu_debugfs_test_ib_ring_list, 1);
4003}
4004
d38ceaf9
AD
4005int amdgpu_debugfs_init(struct drm_minor *minor)
4006{
4007 return 0;
4008}
db95e218
KR
4009
4010static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
4011{
4012 struct drm_info_node *node = (struct drm_info_node *) m->private;
4013 struct drm_device *dev = node->minor->dev;
4014 struct amdgpu_device *adev = dev->dev_private;
4015
4016 seq_write(m, adev->bios, adev->bios_size);
4017 return 0;
4018}
4019
db95e218
KR
4020static const struct drm_info_list amdgpu_vbios_dump_list[] = {
4021 {"amdgpu_vbios",
4022 amdgpu_debugfs_get_vbios_dump,
4023 0, NULL},
4024};
4025
db95e218
KR
4026static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
4027{
4028 return amdgpu_debugfs_add_files(adev,
4029 amdgpu_vbios_dump_list, 1);
4030}
7cebc728 4031#else
27bad5b9 4032static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
4f0955fc
HR
4033{
4034 return 0;
4035}
7cebc728
AK
4036static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
4037{
4038 return 0;
4039}
db95e218
KR
4040static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
4041{
4042 return 0;
4043}
7cebc728 4044static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
d38ceaf9 4045#endif