]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/radeon/radeon_device.c
Merge remote-tracking branch 'regulator/fix/max77802' into regulator-linus
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
b8751946 33#include <linux/pm_runtime.h>
28d52043 34#include <linux/vgaarb.h>
6a9ee8af 35#include <linux/vga_switcheroo.h>
bcc65fd8 36#include <linux/efi.h>
771fe6b9
JG
37#include "radeon_reg.h"
38#include "radeon.h"
771fe6b9
JG
39#include "atom.h"
40
1b5331d9
JG
41static const char radeon_family_name[][16] = {
42 "R100",
43 "RV100",
44 "RS100",
45 "RV200",
46 "RS200",
47 "R200",
48 "RV250",
49 "RS300",
50 "RV280",
51 "R300",
52 "R350",
53 "RV350",
54 "RV380",
55 "R420",
56 "R423",
57 "RV410",
58 "RS400",
59 "RS480",
60 "RS600",
61 "RS690",
62 "RS740",
63 "RV515",
64 "R520",
65 "RV530",
66 "RV560",
67 "RV570",
68 "R580",
69 "R600",
70 "RV610",
71 "RV630",
72 "RV670",
73 "RV620",
74 "RV635",
75 "RS780",
76 "RS880",
77 "RV770",
78 "RV730",
79 "RV710",
80 "RV740",
81 "CEDAR",
82 "REDWOOD",
83 "JUNIPER",
84 "CYPRESS",
85 "HEMLOCK",
b08ebe7e 86 "PALM",
4df64e65
AD
87 "SUMO",
88 "SUMO2",
1fe18305
AD
89 "BARTS",
90 "TURKS",
91 "CAICOS",
b7cfc9fe 92 "CAYMAN",
8848f759 93 "ARUBA",
cb28bb34
AD
94 "TAHITI",
95 "PITCAIRN",
96 "VERDE",
624d3524 97 "OLAND",
b5d9d726 98 "HAINAN",
6eac752e
AD
99 "BONAIRE",
100 "KAVERI",
101 "KABINI",
3bf599e8 102 "HAWAII",
b0a9f22a 103 "MULLINS",
1b5331d9
JG
104 "LAST",
105};
106
066f1f0b
AD
107#if defined(CONFIG_VGA_SWITCHEROO)
108bool radeon_has_atpx_dgpu_power_cntl(void);
109bool radeon_is_atpx_hybrid(void);
110#else
111static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
112static inline bool radeon_is_atpx_hybrid(void) { return false; }
113#endif
114
4807c5a8
AD
115#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
116#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
117
118struct radeon_px_quirk {
119 u32 chip_vendor;
120 u32 chip_device;
121 u32 subsys_vendor;
122 u32 subsys_device;
123 u32 px_quirk_flags;
124};
125
126static struct radeon_px_quirk radeon_px_quirk_list[] = {
127 /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
128 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
129 */
130 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
131 /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
132 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
133 */
134 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
ff1b1294
AD
135 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
136 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
137 */
138 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
4eb59793
AD
139 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
140 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
141 */
142 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
4807c5a8
AD
143 /* macbook pro 8.2 */
144 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
145 { 0, 0, 0, 0, 0 },
146};
147
90c4cde9
AD
148bool radeon_is_px(struct drm_device *dev)
149{
150 struct radeon_device *rdev = dev->dev_private;
151
152 if (rdev->flags & RADEON_IS_PX)
153 return true;
154 return false;
155}
10ebc0bc 156
4807c5a8
AD
157static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
158{
159 struct radeon_px_quirk *p = radeon_px_quirk_list;
160
161 /* Apply PX quirks */
162 while (p && p->chip_device != 0) {
163 if (rdev->pdev->vendor == p->chip_vendor &&
164 rdev->pdev->device == p->chip_device &&
165 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
166 rdev->pdev->subsystem_device == p->subsys_device) {
167 rdev->px_quirk_flags = p->px_quirk_flags;
168 break;
169 }
170 ++p;
171 }
172
173 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
174 rdev->flags &= ~RADEON_IS_PX;
066f1f0b
AD
175
176 /* disable PX is the system doesn't support dGPU power control or hybrid gfx */
177 if (!radeon_is_atpx_hybrid() &&
178 !radeon_has_atpx_dgpu_power_cntl())
179 rdev->flags &= ~RADEON_IS_PX;
4807c5a8
AD
180}
181
2e1b65f9
AD
182/**
183 * radeon_program_register_sequence - program an array of registers.
184 *
185 * @rdev: radeon_device pointer
186 * @registers: pointer to the register array
187 * @array_size: size of the register array
188 *
189 * Programs an array or registers with and and or masks.
190 * This is a helper for setting golden registers.
191 */
192void radeon_program_register_sequence(struct radeon_device *rdev,
193 const u32 *registers,
194 const u32 array_size)
195{
196 u32 tmp, reg, and_mask, or_mask;
197 int i;
198
199 if (array_size % 3)
200 return;
201
202 for (i = 0; i < array_size; i +=3) {
203 reg = registers[i + 0];
204 and_mask = registers[i + 1];
205 or_mask = registers[i + 2];
206
207 if (and_mask == 0xffffffff) {
208 tmp = or_mask;
209 } else {
210 tmp = RREG32(reg);
211 tmp &= ~and_mask;
212 tmp |= or_mask;
213 }
214 WREG32(reg, tmp);
215 }
216}
217
1a0041b8
AD
218void radeon_pci_config_reset(struct radeon_device *rdev)
219{
220 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
221}
222
0c195119
AD
223/**
224 * radeon_surface_init - Clear GPU surface registers.
225 *
226 * @rdev: radeon_device pointer
227 *
228 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 229 */
3ce0a23d 230void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
231{
232 /* FIXME: check this out */
233 if (rdev->family < CHIP_R600) {
234 int i;
235
550e2d92
DA
236 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
237 if (rdev->surface_regs[i].bo)
238 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
239 else
240 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 241 }
e024e110
DA
242 /* enable surfaces */
243 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
244 }
245}
246
771fe6b9
JG
247/*
248 * GPU scratch registers helpers function.
249 */
0c195119
AD
250/**
251 * radeon_scratch_init - Init scratch register driver information.
252 *
253 * @rdev: radeon_device pointer
254 *
255 * Init CP scratch register driver information (r1xx-r5xx)
256 */
3ce0a23d 257void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
258{
259 int i;
260
261 /* FIXME: check this out */
262 if (rdev->family < CHIP_R300) {
263 rdev->scratch.num_reg = 5;
264 } else {
265 rdev->scratch.num_reg = 7;
266 }
724c80e1 267 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
268 for (i = 0; i < rdev->scratch.num_reg; i++) {
269 rdev->scratch.free[i] = true;
724c80e1 270 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
271 }
272}
273
0c195119
AD
274/**
275 * radeon_scratch_get - Allocate a scratch register
276 *
277 * @rdev: radeon_device pointer
278 * @reg: scratch register mmio offset
279 *
280 * Allocate a CP scratch register for use by the driver (all asics).
281 * Returns 0 on success or -EINVAL on failure.
282 */
771fe6b9
JG
283int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
284{
285 int i;
286
287 for (i = 0; i < rdev->scratch.num_reg; i++) {
288 if (rdev->scratch.free[i]) {
289 rdev->scratch.free[i] = false;
290 *reg = rdev->scratch.reg[i];
291 return 0;
292 }
293 }
294 return -EINVAL;
295}
296
0c195119
AD
297/**
298 * radeon_scratch_free - Free a scratch register
299 *
300 * @rdev: radeon_device pointer
301 * @reg: scratch register mmio offset
302 *
303 * Free a CP scratch register allocated for use by the driver (all asics)
304 */
771fe6b9
JG
305void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
306{
307 int i;
308
309 for (i = 0; i < rdev->scratch.num_reg; i++) {
310 if (rdev->scratch.reg[i] == reg) {
311 rdev->scratch.free[i] = true;
312 return;
313 }
314 }
315}
316
75efdee1
AD
317/*
318 * GPU doorbell aperture helpers function.
319 */
320/**
321 * radeon_doorbell_init - Init doorbell driver information.
322 *
323 * @rdev: radeon_device pointer
324 *
325 * Init doorbell driver information (CIK)
326 * Returns 0 on success, error on failure.
327 */
28f5a6cd 328static int radeon_doorbell_init(struct radeon_device *rdev)
75efdee1 329{
75efdee1
AD
330 /* doorbell bar mapping */
331 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
332 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
333
d5754ab8
AL
334 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
335 if (rdev->doorbell.num_doorbells == 0)
336 return -EINVAL;
75efdee1 337
d5754ab8 338 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
75efdee1
AD
339 if (rdev->doorbell.ptr == NULL) {
340 return -ENOMEM;
341 }
342 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
343 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
344
d5754ab8 345 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
75efdee1 346
75efdee1
AD
347 return 0;
348}
349
350/**
351 * radeon_doorbell_fini - Tear down doorbell driver information.
352 *
353 * @rdev: radeon_device pointer
354 *
355 * Tear down doorbell driver information (CIK)
356 */
28f5a6cd 357static void radeon_doorbell_fini(struct radeon_device *rdev)
75efdee1
AD
358{
359 iounmap(rdev->doorbell.ptr);
360 rdev->doorbell.ptr = NULL;
361}
362
363/**
d5754ab8 364 * radeon_doorbell_get - Allocate a doorbell entry
75efdee1
AD
365 *
366 * @rdev: radeon_device pointer
d5754ab8 367 * @doorbell: doorbell index
75efdee1 368 *
d5754ab8 369 * Allocate a doorbell for use by the driver (all asics).
75efdee1
AD
370 * Returns 0 on success or -EINVAL on failure.
371 */
372int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
373{
d5754ab8
AL
374 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
375 if (offset < rdev->doorbell.num_doorbells) {
376 __set_bit(offset, rdev->doorbell.used);
377 *doorbell = offset;
378 return 0;
379 } else {
380 return -EINVAL;
75efdee1 381 }
75efdee1
AD
382}
383
384/**
d5754ab8 385 * radeon_doorbell_free - Free a doorbell entry
75efdee1
AD
386 *
387 * @rdev: radeon_device pointer
d5754ab8 388 * @doorbell: doorbell index
75efdee1 389 *
d5754ab8 390 * Free a doorbell allocated for use by the driver (all asics)
75efdee1
AD
391 */
392void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
393{
d5754ab8
AL
394 if (doorbell < rdev->doorbell.num_doorbells)
395 __clear_bit(doorbell, rdev->doorbell.used);
75efdee1
AD
396}
397
ebff8453
OG
398/**
399 * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
400 * setup KFD
401 *
402 * @rdev: radeon_device pointer
403 * @aperture_base: output returning doorbell aperture base physical address
404 * @aperture_size: output returning doorbell aperture size in bytes
405 * @start_offset: output returning # of doorbell bytes reserved for radeon.
406 *
407 * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
408 * takes doorbells required for its own rings and reports the setup to KFD.
409 * Radeon reserved doorbells are at the start of the doorbell aperture.
410 */
411void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
412 phys_addr_t *aperture_base,
413 size_t *aperture_size,
414 size_t *start_offset)
415{
416 /* The first num_doorbells are used by radeon.
417 * KFD takes whatever's left in the aperture. */
418 if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
419 *aperture_base = rdev->doorbell.base;
420 *aperture_size = rdev->doorbell.size;
421 *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
422 } else {
423 *aperture_base = 0;
424 *aperture_size = 0;
425 *start_offset = 0;
426 }
427}
428
0c195119
AD
429/*
430 * radeon_wb_*()
431 * Writeback is the the method by which the the GPU updates special pages
432 * in memory with the status of certain GPU events (fences, ring pointers,
433 * etc.).
434 */
435
436/**
437 * radeon_wb_disable - Disable Writeback
438 *
439 * @rdev: radeon_device pointer
440 *
441 * Disables Writeback (all asics). Used for suspend.
442 */
724c80e1
AD
443void radeon_wb_disable(struct radeon_device *rdev)
444{
724c80e1
AD
445 rdev->wb.enabled = false;
446}
447
0c195119
AD
448/**
449 * radeon_wb_fini - Disable Writeback and free memory
450 *
451 * @rdev: radeon_device pointer
452 *
453 * Disables Writeback and frees the Writeback memory (all asics).
454 * Used at driver shutdown.
455 */
724c80e1
AD
456void radeon_wb_fini(struct radeon_device *rdev)
457{
458 radeon_wb_disable(rdev);
459 if (rdev->wb.wb_obj) {
089920f2
JG
460 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
461 radeon_bo_kunmap(rdev->wb.wb_obj);
462 radeon_bo_unpin(rdev->wb.wb_obj);
463 radeon_bo_unreserve(rdev->wb.wb_obj);
464 }
724c80e1
AD
465 radeon_bo_unref(&rdev->wb.wb_obj);
466 rdev->wb.wb = NULL;
467 rdev->wb.wb_obj = NULL;
468 }
469}
470
0c195119
AD
471/**
472 * radeon_wb_init- Init Writeback driver info and allocate memory
473 *
474 * @rdev: radeon_device pointer
475 *
476 * Disables Writeback and frees the Writeback memory (all asics).
477 * Used at driver startup.
478 * Returns 0 on success or an -error on failure.
479 */
724c80e1
AD
480int radeon_wb_init(struct radeon_device *rdev)
481{
482 int r;
483
484 if (rdev->wb.wb_obj == NULL) {
441921d5 485 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
831b6966 486 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
02376d82 487 &rdev->wb.wb_obj);
724c80e1
AD
488 if (r) {
489 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
490 return r;
491 }
089920f2
JG
492 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
493 if (unlikely(r != 0)) {
494 radeon_wb_fini(rdev);
495 return r;
496 }
497 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
498 &rdev->wb.gpu_addr);
499 if (r) {
500 radeon_bo_unreserve(rdev->wb.wb_obj);
501 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
502 radeon_wb_fini(rdev);
503 return r;
504 }
505 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
724c80e1 506 radeon_bo_unreserve(rdev->wb.wb_obj);
089920f2
JG
507 if (r) {
508 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
509 radeon_wb_fini(rdev);
510 return r;
511 }
724c80e1
AD
512 }
513
e6ba7599
AD
514 /* clear wb memory */
515 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
516 /* disable event_write fences */
517 rdev->wb.use_event = false;
724c80e1 518 /* disabled via module param */
3b7a2b24 519 if (radeon_no_wb == 1) {
724c80e1 520 rdev->wb.enabled = false;
3b7a2b24 521 } else {
724c80e1 522 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
523 /* often unreliable on AGP */
524 rdev->wb.enabled = false;
525 } else if (rdev->family < CHIP_R300) {
526 /* often unreliable on pre-r300 */
724c80e1 527 rdev->wb.enabled = false;
d0f8a854 528 } else {
724c80e1 529 rdev->wb.enabled = true;
d0f8a854 530 /* event_write fences are only available on r600+ */
3b7a2b24 531 if (rdev->family >= CHIP_R600) {
d0f8a854 532 rdev->wb.use_event = true;
3b7a2b24 533 }
d0f8a854 534 }
724c80e1 535 }
c994ead6
AD
536 /* always use writeback/events on NI, APUs */
537 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
538 rdev->wb.enabled = true;
539 rdev->wb.use_event = true;
540 }
724c80e1
AD
541
542 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
543
544 return 0;
545}
546
d594e46a
JG
547/**
548 * radeon_vram_location - try to find VRAM location
549 * @rdev: radeon device structure holding all necessary informations
550 * @mc: memory controller structure holding memory informations
551 * @base: base address at which to put VRAM
552 *
553 * Function will place try to place VRAM at base address provided
554 * as parameter (which is so far either PCI aperture address or
555 * for IGP TOM base address).
556 *
557 * If there is not enough space to fit the unvisible VRAM in the 32bits
558 * address space then we limit the VRAM size to the aperture.
559 *
560 * If we are using AGP and if the AGP aperture doesn't allow us to have
561 * room for all the VRAM than we restrict the VRAM to the PCI aperture
562 * size and print a warning.
563 *
564 * This function will never fails, worst case are limiting VRAM.
565 *
566 * Note: GTT start, end, size should be initialized before calling this
567 * function on AGP platform.
568 *
25985edc 569 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
570 * this shouldn't be a problem as we are using the PCI aperture as a reference.
571 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
572 * not IGP.
573 *
574 * Note: we use mc_vram_size as on some board we need to program the mc to
575 * cover the whole aperture even if VRAM size is inferior to aperture size
576 * Novell bug 204882 + along with lots of ubuntu ones
577 *
578 * Note: when limiting vram it's safe to overwritte real_vram_size because
579 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
580 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
581 * ones)
582 *
583 * Note: IGP TOM addr should be the same as the aperture addr, we don't
584 * explicitly check for that thought.
585 *
586 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 587 */
d594e46a 588void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 589{
1bcb04f7
CK
590 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
591
d594e46a 592 mc->vram_start = base;
9ed8b1f9 593 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
594 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
595 mc->real_vram_size = mc->aper_size;
596 mc->mc_vram_size = mc->aper_size;
597 }
598 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 599 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
600 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
601 mc->real_vram_size = mc->aper_size;
602 mc->mc_vram_size = mc->aper_size;
603 }
604 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
605 if (limit && limit < mc->real_vram_size)
606 mc->real_vram_size = limit;
dd7cc55a 607 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
608 mc->mc_vram_size >> 20, mc->vram_start,
609 mc->vram_end, mc->real_vram_size >> 20);
610}
771fe6b9 611
d594e46a
JG
612/**
613 * radeon_gtt_location - try to find GTT location
614 * @rdev: radeon device structure holding all necessary informations
615 * @mc: memory controller structure holding memory informations
616 *
617 * Function will place try to place GTT before or after VRAM.
618 *
619 * If GTT size is bigger than space left then we ajust GTT size.
620 * Thus function will never fails.
621 *
622 * FIXME: when reducing GTT size align new size on power of 2.
623 */
624void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
625{
626 u64 size_af, size_bf;
627
9ed8b1f9 628 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 629 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
630 if (size_bf > size_af) {
631 if (mc->gtt_size > size_bf) {
632 dev_warn(rdev->dev, "limiting GTT\n");
633 mc->gtt_size = size_bf;
771fe6b9 634 }
8d369bb1 635 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 636 } else {
d594e46a
JG
637 if (mc->gtt_size > size_af) {
638 dev_warn(rdev->dev, "limiting GTT\n");
639 mc->gtt_size = size_af;
640 }
8d369bb1 641 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 642 }
d594e46a 643 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 644 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 645 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
646}
647
771fe6b9
JG
648/*
649 * GPU helpers function.
650 */
05082b8b
AD
651
652/**
653 * radeon_device_is_virtual - check if we are running is a virtual environment
654 *
655 * Check if the asic has been passed through to a VM (all asics).
656 * Used at driver startup.
657 * Returns true if virtual or false if not.
658 */
a801abe4 659bool radeon_device_is_virtual(void)
05082b8b
AD
660{
661#ifdef CONFIG_X86
662 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
663#else
664 return false;
665#endif
666}
667
0c195119
AD
668/**
669 * radeon_card_posted - check if the hw has already been initialized
670 *
671 * @rdev: radeon_device pointer
672 *
673 * Check if the asic has been initialized (all asics).
674 * Used at driver startup.
675 * Returns true if initialized or false if not.
676 */
9f022ddf 677bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
678{
679 uint32_t reg;
680
884031f0
AD
681 /* for pass through, always force asic_init for CI */
682 if (rdev->family >= CHIP_BONAIRE &&
683 radeon_device_is_virtual())
05082b8b
AD
684 return false;
685
50a583f6 686 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
83e68189 687 if (efi_enabled(EFI_BOOT) &&
50a583f6
AD
688 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
689 (rdev->family < CHIP_R600))
bcc65fd8
MG
690 return false;
691
2cf3a4fc
AD
692 if (ASIC_IS_NODCE(rdev))
693 goto check_memsize;
694
771fe6b9 695 /* first check CRTCs */
09fb8bd1 696 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
697 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
698 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
699 if (rdev->num_crtc >= 4) {
700 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
701 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
702 }
703 if (rdev->num_crtc >= 6) {
704 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
705 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
706 }
bcc1c2a1
AD
707 if (reg & EVERGREEN_CRTC_MASTER_EN)
708 return true;
709 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
710 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
711 RREG32(AVIVO_D2CRTC_CONTROL);
712 if (reg & AVIVO_CRTC_EN) {
713 return true;
714 }
715 } else {
716 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
717 RREG32(RADEON_CRTC2_GEN_CNTL);
718 if (reg & RADEON_CRTC_EN) {
719 return true;
720 }
721 }
722
2cf3a4fc 723check_memsize:
771fe6b9
JG
724 /* then check MEM_SIZE, in case the crtcs are off */
725 if (rdev->family >= CHIP_R600)
726 reg = RREG32(R600_CONFIG_MEMSIZE);
727 else
728 reg = RREG32(RADEON_CONFIG_MEMSIZE);
729
730 if (reg)
731 return true;
732
733 return false;
734
735}
736
0c195119
AD
737/**
738 * radeon_update_bandwidth_info - update display bandwidth params
739 *
740 * @rdev: radeon_device pointer
741 *
742 * Used when sclk/mclk are switched or display modes are set.
743 * params are used to calculate display watermarks (all asics)
744 */
f47299c5
AD
745void radeon_update_bandwidth_info(struct radeon_device *rdev)
746{
747 fixed20_12 a;
8807286e
AD
748 u32 sclk = rdev->pm.current_sclk;
749 u32 mclk = rdev->pm.current_mclk;
f47299c5 750
8807286e
AD
751 /* sclk/mclk in Mhz */
752 a.full = dfixed_const(100);
753 rdev->pm.sclk.full = dfixed_const(sclk);
754 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
755 rdev->pm.mclk.full = dfixed_const(mclk);
756 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 757
8807286e 758 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 759 a.full = dfixed_const(16);
f47299c5 760 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 761 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
762 }
763}
764
0c195119
AD
765/**
766 * radeon_boot_test_post_card - check and possibly initialize the hw
767 *
768 * @rdev: radeon_device pointer
769 *
770 * Check if the asic is initialized and if not, attempt to initialize
771 * it (all asics).
772 * Returns true if initialized or false if not.
773 */
72542d77
DA
774bool radeon_boot_test_post_card(struct radeon_device *rdev)
775{
776 if (radeon_card_posted(rdev))
777 return true;
778
779 if (rdev->bios) {
780 DRM_INFO("GPU not posted. posting now...\n");
781 if (rdev->is_atom_bios)
782 atom_asic_init(rdev->mode_info.atom_context);
783 else
784 radeon_combios_asic_init(rdev->ddev);
785 return true;
786 } else {
787 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
788 return false;
789 }
790}
791
0c195119
AD
792/**
793 * radeon_dummy_page_init - init dummy page used by the driver
794 *
795 * @rdev: radeon_device pointer
796 *
797 * Allocate the dummy page used by the driver (all asics).
798 * This dummy page is used by the driver as a filler for gart entries
799 * when pages are taken out of the GART
800 * Returns 0 on sucess, -ENOMEM on failure.
801 */
3ce0a23d
JG
802int radeon_dummy_page_init(struct radeon_device *rdev)
803{
82568565
DA
804 if (rdev->dummy_page.page)
805 return 0;
3ce0a23d
JG
806 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
807 if (rdev->dummy_page.page == NULL)
808 return -ENOMEM;
809 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
810 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
811 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
812 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
813 __free_page(rdev->dummy_page.page);
814 rdev->dummy_page.page = NULL;
815 return -ENOMEM;
816 }
cb658906
MD
817 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
818 RADEON_GART_PAGE_DUMMY);
3ce0a23d
JG
819 return 0;
820}
821
0c195119
AD
822/**
823 * radeon_dummy_page_fini - free dummy page used by the driver
824 *
825 * @rdev: radeon_device pointer
826 *
827 * Frees the dummy page used by the driver (all asics).
828 */
3ce0a23d
JG
829void radeon_dummy_page_fini(struct radeon_device *rdev)
830{
831 if (rdev->dummy_page.page == NULL)
832 return;
833 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
834 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
835 __free_page(rdev->dummy_page.page);
836 rdev->dummy_page.page = NULL;
837}
838
771fe6b9 839
771fe6b9 840/* ATOM accessor methods */
0c195119
AD
841/*
842 * ATOM is an interpreted byte code stored in tables in the vbios. The
843 * driver registers callbacks to access registers and the interpreter
844 * in the driver parses the tables and executes then to program specific
845 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
846 * atombios.h, and atom.c
847 */
848
849/**
850 * cail_pll_read - read PLL register
851 *
852 * @info: atom card_info pointer
853 * @reg: PLL register offset
854 *
855 * Provides a PLL register accessor for the atom interpreter (r4xx+).
856 * Returns the value of the PLL register.
857 */
771fe6b9
JG
858static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
859{
860 struct radeon_device *rdev = info->dev->dev_private;
861 uint32_t r;
862
863 r = rdev->pll_rreg(rdev, reg);
864 return r;
865}
866
0c195119
AD
867/**
868 * cail_pll_write - write PLL register
869 *
870 * @info: atom card_info pointer
871 * @reg: PLL register offset
872 * @val: value to write to the pll register
873 *
874 * Provides a PLL register accessor for the atom interpreter (r4xx+).
875 */
771fe6b9
JG
876static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
877{
878 struct radeon_device *rdev = info->dev->dev_private;
879
880 rdev->pll_wreg(rdev, reg, val);
881}
882
0c195119
AD
883/**
884 * cail_mc_read - read MC (Memory Controller) register
885 *
886 * @info: atom card_info pointer
887 * @reg: MC register offset
888 *
889 * Provides an MC register accessor for the atom interpreter (r4xx+).
890 * Returns the value of the MC register.
891 */
771fe6b9
JG
892static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
893{
894 struct radeon_device *rdev = info->dev->dev_private;
895 uint32_t r;
896
897 r = rdev->mc_rreg(rdev, reg);
898 return r;
899}
900
0c195119
AD
901/**
902 * cail_mc_write - write MC (Memory Controller) register
903 *
904 * @info: atom card_info pointer
905 * @reg: MC register offset
906 * @val: value to write to the pll register
907 *
908 * Provides a MC register accessor for the atom interpreter (r4xx+).
909 */
771fe6b9
JG
910static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
911{
912 struct radeon_device *rdev = info->dev->dev_private;
913
914 rdev->mc_wreg(rdev, reg, val);
915}
916
0c195119
AD
917/**
918 * cail_reg_write - write MMIO register
919 *
920 * @info: atom card_info pointer
921 * @reg: MMIO register offset
922 * @val: value to write to the pll register
923 *
924 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
925 */
771fe6b9
JG
926static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
927{
928 struct radeon_device *rdev = info->dev->dev_private;
929
930 WREG32(reg*4, val);
931}
932
0c195119
AD
933/**
934 * cail_reg_read - read MMIO register
935 *
936 * @info: atom card_info pointer
937 * @reg: MMIO register offset
938 *
939 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
940 * Returns the value of the MMIO register.
941 */
771fe6b9
JG
942static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
943{
944 struct radeon_device *rdev = info->dev->dev_private;
945 uint32_t r;
946
947 r = RREG32(reg*4);
948 return r;
949}
950
0c195119
AD
951/**
952 * cail_ioreg_write - write IO register
953 *
954 * @info: atom card_info pointer
955 * @reg: IO register offset
956 * @val: value to write to the pll register
957 *
958 * Provides a IO register accessor for the atom interpreter (r4xx+).
959 */
351a52a2
AD
960static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
961{
962 struct radeon_device *rdev = info->dev->dev_private;
963
964 WREG32_IO(reg*4, val);
965}
966
0c195119
AD
967/**
968 * cail_ioreg_read - read IO register
969 *
970 * @info: atom card_info pointer
971 * @reg: IO register offset
972 *
973 * Provides an IO register accessor for the atom interpreter (r4xx+).
974 * Returns the value of the IO register.
975 */
351a52a2
AD
976static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
977{
978 struct radeon_device *rdev = info->dev->dev_private;
979 uint32_t r;
980
981 r = RREG32_IO(reg*4);
982 return r;
983}
984
0c195119
AD
985/**
986 * radeon_atombios_init - init the driver info and callbacks for atombios
987 *
988 * @rdev: radeon_device pointer
989 *
990 * Initializes the driver info and register access callbacks for the
991 * ATOM interpreter (r4xx+).
992 * Returns 0 on sucess, -ENOMEM on failure.
993 * Called at driver startup.
994 */
771fe6b9
JG
995int radeon_atombios_init(struct radeon_device *rdev)
996{
61c4b24b
MF
997 struct card_info *atom_card_info =
998 kzalloc(sizeof(struct card_info), GFP_KERNEL);
999
1000 if (!atom_card_info)
1001 return -ENOMEM;
1002
1003 rdev->mode_info.atom_card_info = atom_card_info;
1004 atom_card_info->dev = rdev->ddev;
1005 atom_card_info->reg_read = cail_reg_read;
1006 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
1007 /* needed for iio ops */
1008 if (rdev->rio_mem) {
1009 atom_card_info->ioreg_read = cail_ioreg_read;
1010 atom_card_info->ioreg_write = cail_ioreg_write;
1011 } else {
1012 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
1013 atom_card_info->ioreg_read = cail_reg_read;
1014 atom_card_info->ioreg_write = cail_reg_write;
1015 }
61c4b24b
MF
1016 atom_card_info->mc_read = cail_mc_read;
1017 atom_card_info->mc_write = cail_mc_write;
1018 atom_card_info->pll_read = cail_pll_read;
1019 atom_card_info->pll_write = cail_pll_write;
1020
1021 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
1022 if (!rdev->mode_info.atom_context) {
1023 radeon_atombios_fini(rdev);
1024 return -ENOMEM;
1025 }
1026
c31ad97f 1027 mutex_init(&rdev->mode_info.atom_context->mutex);
1c949842 1028 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
771fe6b9 1029 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 1030 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
1031 return 0;
1032}
1033
0c195119
AD
1034/**
1035 * radeon_atombios_fini - free the driver info and callbacks for atombios
1036 *
1037 * @rdev: radeon_device pointer
1038 *
1039 * Frees the driver info and register access callbacks for the ATOM
1040 * interpreter (r4xx+).
1041 * Called at driver shutdown.
1042 */
771fe6b9
JG
1043void radeon_atombios_fini(struct radeon_device *rdev)
1044{
4a04a844
JG
1045 if (rdev->mode_info.atom_context) {
1046 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 1047 }
0e34d094
TG
1048 kfree(rdev->mode_info.atom_context);
1049 rdev->mode_info.atom_context = NULL;
61c4b24b 1050 kfree(rdev->mode_info.atom_card_info);
0e34d094 1051 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
1052}
1053
0c195119
AD
1054/* COMBIOS */
1055/*
1056 * COMBIOS is the bios format prior to ATOM. It provides
1057 * command tables similar to ATOM, but doesn't have a unified
1058 * parser. See radeon_combios.c
1059 */
1060
1061/**
1062 * radeon_combios_init - init the driver info for combios
1063 *
1064 * @rdev: radeon_device pointer
1065 *
1066 * Initializes the driver info for combios (r1xx-r3xx).
1067 * Returns 0 on sucess.
1068 * Called at driver startup.
1069 */
771fe6b9
JG
1070int radeon_combios_init(struct radeon_device *rdev)
1071{
1072 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1073 return 0;
1074}
1075
0c195119
AD
1076/**
1077 * radeon_combios_fini - free the driver info for combios
1078 *
1079 * @rdev: radeon_device pointer
1080 *
1081 * Frees the driver info for combios (r1xx-r3xx).
1082 * Called at driver shutdown.
1083 */
771fe6b9
JG
1084void radeon_combios_fini(struct radeon_device *rdev)
1085{
1086}
1087
0c195119
AD
1088/* if we get transitioned to only one device, take VGA back */
1089/**
1090 * radeon_vga_set_decode - enable/disable vga decode
1091 *
1092 * @cookie: radeon_device pointer
1093 * @state: enable/disable vga decode
1094 *
1095 * Enable/disable vga decode (all asics).
1096 * Returns VGA resource flags.
1097 */
28d52043
DA
1098static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1099{
1100 struct radeon_device *rdev = cookie;
28d52043
DA
1101 radeon_vga_set_state(rdev, state);
1102 if (state)
1103 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1104 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1105 else
1106 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1107}
c1176d6f 1108
1bcb04f7
CK
1109/**
1110 * radeon_check_pot_argument - check that argument is a power of two
1111 *
1112 * @arg: value to check
1113 *
1114 * Validates that a certain argument is a power of two (all asics).
1115 * Returns true if argument is valid.
1116 */
1117static bool radeon_check_pot_argument(int arg)
1118{
1119 return (arg & (arg - 1)) == 0;
1120}
1121
5e3c4f90
GG
1122/**
1123 * Determine a sensible default GART size according to ASIC family.
1124 *
1125 * @family ASIC family name
1126 */
1127static int radeon_gart_size_auto(enum radeon_family family)
1128{
1129 /* default to a larger gart size on newer asics */
1130 if (family >= CHIP_TAHITI)
1131 return 2048;
1132 else if (family >= CHIP_RV770)
1133 return 1024;
1134 else
1135 return 512;
1136}
1137
0c195119
AD
1138/**
1139 * radeon_check_arguments - validate module params
1140 *
1141 * @rdev: radeon_device pointer
1142 *
1143 * Validates certain module parameters and updates
1144 * the associated values used by the driver (all asics).
1145 */
1109ca09 1146static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
1147{
1148 /* vramlimit must be a power of two */
1bcb04f7 1149 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
1150 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1151 radeon_vram_limit);
1152 radeon_vram_limit = 0;
36421338 1153 }
1bcb04f7 1154
edcd26e8 1155 if (radeon_gart_size == -1) {
5e3c4f90 1156 radeon_gart_size = radeon_gart_size_auto(rdev->family);
edcd26e8 1157 }
36421338 1158 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 1159 if (radeon_gart_size < 32) {
edcd26e8 1160 dev_warn(rdev->dev, "gart size (%d) too small\n",
36421338 1161 radeon_gart_size);
5e3c4f90 1162 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1bcb04f7 1163 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
1164 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1165 radeon_gart_size);
5e3c4f90 1166 radeon_gart_size = radeon_gart_size_auto(rdev->family);
36421338 1167 }
1bcb04f7
CK
1168 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1169
36421338
JG
1170 /* AGP mode can only be -1, 1, 2, 4, 8 */
1171 switch (radeon_agpmode) {
1172 case -1:
1173 case 0:
1174 case 1:
1175 case 2:
1176 case 4:
1177 case 8:
1178 break;
1179 default:
1180 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1181 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1182 radeon_agpmode = 0;
1183 break;
1184 }
c1c44132
CK
1185
1186 if (!radeon_check_pot_argument(radeon_vm_size)) {
1187 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1188 radeon_vm_size);
20b2656d 1189 radeon_vm_size = 4;
c1c44132
CK
1190 }
1191
20b2656d 1192 if (radeon_vm_size < 1) {
13c240ef 1193 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
c1c44132 1194 radeon_vm_size);
20b2656d 1195 radeon_vm_size = 4;
c1c44132
CK
1196 }
1197
3cf8bb1a
JG
1198 /*
1199 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1200 */
20b2656d
CK
1201 if (radeon_vm_size > 1024) {
1202 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
c1c44132 1203 radeon_vm_size);
20b2656d 1204 radeon_vm_size = 4;
c1c44132 1205 }
4510fb98
CK
1206
1207 /* defines number of bits in page table versus page directory,
1208 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1209 * page table and the remaining bits are in the page directory */
dfc230f9
CK
1210 if (radeon_vm_block_size == -1) {
1211
1212 /* Total bits covered by PD + PTs */
8e66e134 1213 unsigned bits = ilog2(radeon_vm_size) + 18;
dfc230f9
CK
1214
1215 /* Make sure the PD is 4K in size up to 8GB address space.
1216 Above that split equal between PD and PTs */
1217 if (radeon_vm_size <= 8)
1218 radeon_vm_block_size = bits - 9;
1219 else
1220 radeon_vm_block_size = (bits + 3) / 2;
1221
1222 } else if (radeon_vm_block_size < 9) {
20b2656d 1223 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
4510fb98
CK
1224 radeon_vm_block_size);
1225 radeon_vm_block_size = 9;
1226 }
1227
1228 if (radeon_vm_block_size > 24 ||
20b2656d
CK
1229 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1230 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
4510fb98
CK
1231 radeon_vm_block_size);
1232 radeon_vm_block_size = 9;
1233 }
36421338
JG
1234}
1235
0c195119
AD
1236/**
1237 * radeon_switcheroo_set_state - set switcheroo state
1238 *
1239 * @pdev: pci dev pointer
8e5de1d8 1240 * @state: vga_switcheroo state
0c195119
AD
1241 *
1242 * Callback for the switcheroo driver. Suspends or resumes the
1243 * the asics before or after it is powered up using ACPI methods.
1244 */
6a9ee8af
DA
1245static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1246{
1247 struct drm_device *dev = pci_get_drvdata(pdev);
4807c5a8 1248 struct radeon_device *rdev = dev->dev_private;
10ebc0bc 1249
90c4cde9 1250 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
10ebc0bc
DA
1251 return;
1252
6a9ee8af 1253 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
1254 unsigned d3_delay = dev->pdev->d3_delay;
1255
7ca85295 1256 pr_info("radeon: switched on\n");
6a9ee8af 1257 /* don't suspend or resume card normally */
5bcf719b 1258 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e 1259
4807c5a8 1260 if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
d1f9809e
ML
1261 dev->pdev->d3_delay = 20;
1262
10ebc0bc 1263 radeon_resume_kms(dev, true, true);
d1f9809e
ML
1264
1265 dev->pdev->d3_delay = d3_delay;
1266
5bcf719b 1267 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 1268 drm_kms_helper_poll_enable(dev);
6a9ee8af 1269 } else {
7ca85295 1270 pr_info("radeon: switched off\n");
fbf81762 1271 drm_kms_helper_poll_disable(dev);
5bcf719b 1272 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
274ad65c 1273 radeon_suspend_kms(dev, true, true, false);
5bcf719b 1274 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
1275 }
1276}
1277
0c195119
AD
1278/**
1279 * radeon_switcheroo_can_switch - see if switcheroo state can change
1280 *
1281 * @pdev: pci dev pointer
1282 *
1283 * Callback for the switcheroo driver. Check of the switcheroo
1284 * state can be changed.
1285 * Returns true if the state can be changed, false if not.
1286 */
6a9ee8af
DA
1287static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1288{
1289 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af 1290
fc8fd40e
DV
1291 /*
1292 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1293 * locking inversion with the driver load path. And the access here is
1294 * completely racy anyway. So don't bother with locking for now.
1295 */
1296 return dev->open_count == 0;
6a9ee8af
DA
1297}
1298
26ec685f
TI
1299static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1300 .set_gpu_state = radeon_switcheroo_set_state,
1301 .reprobe = NULL,
1302 .can_switch = radeon_switcheroo_can_switch,
1303};
6a9ee8af 1304
0c195119
AD
1305/**
1306 * radeon_device_init - initialize the driver
1307 *
1308 * @rdev: radeon_device pointer
1309 * @pdev: drm dev pointer
1310 * @pdev: pci dev pointer
1311 * @flags: driver flags
1312 *
1313 * Initializes the driver info and hw (all asics).
1314 * Returns 0 for success or an error on failure.
1315 * Called at driver startup.
1316 */
771fe6b9
JG
1317int radeon_device_init(struct radeon_device *rdev,
1318 struct drm_device *ddev,
1319 struct pci_dev *pdev,
1320 uint32_t flags)
1321{
351a52a2 1322 int r, i;
ad49f501 1323 int dma_bits;
10ebc0bc 1324 bool runtime = false;
771fe6b9 1325
771fe6b9 1326 rdev->shutdown = false;
9f022ddf 1327 rdev->dev = &pdev->dev;
771fe6b9
JG
1328 rdev->ddev = ddev;
1329 rdev->pdev = pdev;
1330 rdev->flags = flags;
1331 rdev->family = flags & RADEON_FAMILY_MASK;
1332 rdev->is_atom_bios = false;
1333 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
edcd26e8 1334 rdev->mc.gtt_size = 512 * 1024 * 1024;
733289c2 1335 rdev->accel_working = false;
8b25ed34
AD
1336 /* set up ring ids */
1337 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1338 rdev->ring[i].idx = i;
1339 }
f54d1867 1340 rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
1b5331d9 1341
fe0d36e0
AD
1342 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1343 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1344 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1b5331d9 1345
771fe6b9
JG
1346 /* mutex initialization are all done here so we
1347 * can recall function without having locking issues */
d6999bc7 1348 mutex_init(&rdev->ring_lock);
40bacf16 1349 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1350 atomic_set(&rdev->ih.lock, 0);
4c788679 1351 mutex_init(&rdev->gem.mutex);
c913e23a 1352 mutex_init(&rdev->pm.mutex);
6759a0a7 1353 mutex_init(&rdev->gpu_clock_mutex);
f61d5b46 1354 mutex_init(&rdev->srbm_mutex);
1c0a4625 1355 mutex_init(&rdev->grbm_idx_mutex);
db7fce39 1356 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1357 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1358 init_waitqueue_head(&rdev->irq.vblank_queue);
341cb9e4
CK
1359 mutex_init(&rdev->mn_lock);
1360 hash_init(rdev->mn_hash);
1b9c3dd0
AD
1361 r = radeon_gem_init(rdev);
1362 if (r)
1363 return r;
529364e0 1364
c1c44132 1365 radeon_check_arguments(rdev);
23d4f1f2 1366 /* Adjust VM size here.
c1c44132 1367 * Max GPUVM size for cayman+ is 40 bits.
23d4f1f2 1368 */
20b2656d 1369 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
771fe6b9 1370
4aac0473
JG
1371 /* Set asic functions */
1372 r = radeon_asic_init(rdev);
36421338 1373 if (r)
4aac0473 1374 return r;
4aac0473 1375
f95df9ca
AD
1376 /* all of the newer IGP chips have an internal gart
1377 * However some rs4xx report as AGP, so remove that here.
1378 */
1379 if ((rdev->family >= CHIP_RS400) &&
1380 (rdev->flags & RADEON_IS_IGP)) {
1381 rdev->flags &= ~RADEON_IS_AGP;
1382 }
1383
30256a3f 1384 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1385 radeon_agp_disable(rdev);
771fe6b9
JG
1386 }
1387
9ed8b1f9
AD
1388 /* Set the internal MC address mask
1389 * This is the max address of the GPU's
1390 * internal address space.
1391 */
1392 if (rdev->family >= CHIP_CAYMAN)
1393 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1394 else if (rdev->family >= CHIP_CEDAR)
1395 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1396 else
1397 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1398
ad49f501
DA
1399 /* set DMA mask + need_dma32 flags.
1400 * PCIE - can handle 40-bits.
005a83f1 1401 * IGP - can handle 40-bits
ad49f501 1402 * AGP - generally dma32 is safest
005a83f1 1403 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1404 */
1405 rdev->need_dma32 = false;
1406 if (rdev->flags & RADEON_IS_AGP)
1407 rdev->need_dma32 = true;
005a83f1 1408 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1409 (rdev->family <= CHIP_RS740))
ad49f501
DA
1410 rdev->need_dma32 = true;
1411
1412 dma_bits = rdev->need_dma32 ? 32 : 40;
1413 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1414 if (r) {
62fff811 1415 rdev->need_dma32 = true;
c52494f6 1416 dma_bits = 32;
7ca85295 1417 pr_warn("radeon: No suitable DMA available\n");
771fe6b9 1418 }
c52494f6
KRW
1419 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1420 if (r) {
1421 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
7ca85295 1422 pr_warn("radeon: No coherent DMA available\n");
c52494f6 1423 }
771fe6b9
JG
1424
1425 /* Registers mapping */
1426 /* TODO: block userspace mapping of io register */
2c385151 1427 spin_lock_init(&rdev->mmio_idx_lock);
fe78118c 1428 spin_lock_init(&rdev->smc_idx_lock);
0a5b7b0b
AD
1429 spin_lock_init(&rdev->pll_idx_lock);
1430 spin_lock_init(&rdev->mc_idx_lock);
1431 spin_lock_init(&rdev->pcie_idx_lock);
1432 spin_lock_init(&rdev->pciep_idx_lock);
1433 spin_lock_init(&rdev->pif_idx_lock);
1434 spin_lock_init(&rdev->cg_idx_lock);
1435 spin_lock_init(&rdev->uvd_idx_lock);
1436 spin_lock_init(&rdev->rcu_idx_lock);
1437 spin_lock_init(&rdev->didt_idx_lock);
1438 spin_lock_init(&rdev->end_idx_lock);
efad86db
AD
1439 if (rdev->family >= CHIP_BONAIRE) {
1440 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1441 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1442 } else {
1443 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1444 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1445 }
771fe6b9 1446 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
a33c1a82 1447 if (rdev->rmmio == NULL)
771fe6b9 1448 return -ENOMEM;
771fe6b9 1449
75efdee1
AD
1450 /* doorbell bar mapping */
1451 if (rdev->family >= CHIP_BONAIRE)
1452 radeon_doorbell_init(rdev);
1453
351a52a2
AD
1454 /* io port mapping */
1455 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1456 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1457 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1458 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1459 break;
1460 }
1461 }
1462 if (rdev->rio_mem == NULL)
1463 DRM_ERROR("Unable to find PCI I/O BAR\n");
1464
4807c5a8
AD
1465 if (rdev->flags & RADEON_IS_PX)
1466 radeon_device_handle_px_quirks(rdev);
1467
28d52043 1468 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1469 /* this will fail for cards that aren't VGA class devices, just
1470 * ignore it */
1471 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
10ebc0bc 1472
bfaddd9f 1473 if (rdev->flags & RADEON_IS_PX)
10ebc0bc 1474 runtime = true;
7ffb0ce3
LW
1475 if (!pci_is_thunderbolt_attached(rdev->pdev))
1476 vga_switcheroo_register_client(rdev->pdev,
1477 &radeon_switcheroo_ops, runtime);
10ebc0bc
DA
1478 if (runtime)
1479 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
28d52043 1480
3ce0a23d 1481 r = radeon_init(rdev);
b574f251 1482 if (r)
2e97140d 1483 goto failed;
3ce0a23d 1484
409851f4
JG
1485 r = radeon_gem_debugfs_init(rdev);
1486 if (r) {
1487 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
9843ead0
DA
1488 }
1489
1490 r = radeon_mst_debugfs_init(rdev);
1491 if (r) {
1492 DRM_ERROR("registering mst debugfs failed (%d).\n", r);
409851f4
JG
1493 }
1494
b574f251
JG
1495 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1496 /* Acceleration not working on AGP card try again
1497 * with fallback to PCI or PCIE GART
1498 */
a2d07b74 1499 radeon_asic_reset(rdev);
b574f251
JG
1500 radeon_fini(rdev);
1501 radeon_agp_disable(rdev);
1502 r = radeon_init(rdev);
4aac0473 1503 if (r)
2e97140d 1504 goto failed;
771fe6b9 1505 }
6c7bccea 1506
13a7d299
CK
1507 r = radeon_ib_ring_tests(rdev);
1508 if (r)
1509 DRM_ERROR("ib ring test failed (%d).\n", r);
1510
6dfd1972
JG
1511 /*
1512 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
1513 * after the CP ring have chew one packet at least. Hence here we stop
1514 * and restart DPM after the radeon_ib_ring_tests().
1515 */
1516 if (rdev->pm.dpm_enabled &&
1517 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1518 (rdev->family == CHIP_TURKS) &&
1519 (rdev->flags & RADEON_IS_MOBILITY)) {
1520 mutex_lock(&rdev->pm.mutex);
1521 radeon_dpm_disable(rdev);
1522 radeon_dpm_enable(rdev);
1523 mutex_unlock(&rdev->pm.mutex);
1524 }
1525
60a7e396 1526 if ((radeon_testing & 1)) {
4a1132a0
AD
1527 if (rdev->accel_working)
1528 radeon_test_moves(rdev);
1529 else
1530 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
ecc0b326 1531 }
60a7e396 1532 if ((radeon_testing & 2)) {
4a1132a0
AD
1533 if (rdev->accel_working)
1534 radeon_test_syncing(rdev);
1535 else
1536 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
60a7e396 1537 }
771fe6b9 1538 if (radeon_benchmarking) {
4a1132a0
AD
1539 if (rdev->accel_working)
1540 radeon_benchmark(rdev, radeon_benchmarking);
1541 else
1542 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
771fe6b9 1543 }
6cf8a3f5 1544 return 0;
2e97140d
AD
1545
1546failed:
b8751946
LW
1547 /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
1548 if (radeon_is_px(ddev))
1549 pm_runtime_put_noidle(ddev->dev);
2e97140d
AD
1550 if (runtime)
1551 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1552 return r;
771fe6b9
JG
1553}
1554
0c195119
AD
1555/**
1556 * radeon_device_fini - tear down the driver
1557 *
1558 * @rdev: radeon_device pointer
1559 *
1560 * Tear down the driver info (all asics).
1561 * Called at driver shutdown.
1562 */
771fe6b9
JG
1563void radeon_device_fini(struct radeon_device *rdev)
1564{
771fe6b9
JG
1565 DRM_INFO("radeon: finishing device.\n");
1566 rdev->shutdown = true;
90aca4d2
JG
1567 /* evict vram memory */
1568 radeon_bo_evict_vram(rdev);
62a8ea3f 1569 radeon_fini(rdev);
7ffb0ce3
LW
1570 if (!pci_is_thunderbolt_attached(rdev->pdev))
1571 vga_switcheroo_unregister_client(rdev->pdev);
2e97140d
AD
1572 if (rdev->flags & RADEON_IS_PX)
1573 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
c1176d6f 1574 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1575 if (rdev->rio_mem)
1576 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1577 rdev->rio_mem = NULL;
771fe6b9
JG
1578 iounmap(rdev->rmmio);
1579 rdev->rmmio = NULL;
75efdee1
AD
1580 if (rdev->family >= CHIP_BONAIRE)
1581 radeon_doorbell_fini(rdev);
771fe6b9
JG
1582}
1583
1584
1585/*
1586 * Suspend & resume.
1587 */
0c195119
AD
1588/**
1589 * radeon_suspend_kms - initiate device suspend
1590 *
1591 * @pdev: drm dev pointer
1592 * @state: suspend state
1593 *
1594 * Puts the hw in the suspend state (all asics).
1595 * Returns 0 for success or an error on failure.
1596 * Called at driver suspend.
1597 */
274ad65c
JG
1598int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1599 bool fbcon, bool freeze)
771fe6b9 1600{
875c1866 1601 struct radeon_device *rdev;
771fe6b9 1602 struct drm_crtc *crtc;
d8dcaa1d 1603 struct drm_connector *connector;
7465280c 1604 int i, r;
771fe6b9 1605
875c1866 1606 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1607 return -ENODEV;
1608 }
7473e830 1609
875c1866
DJ
1610 rdev = dev->dev_private;
1611
f2aba352 1612 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1613 return 0;
d8dcaa1d 1614
86698c20
SF
1615 drm_kms_helper_poll_disable(dev);
1616
6adaed5b 1617 drm_modeset_lock_all(dev);
d8dcaa1d
AD
1618 /* turn off display hw */
1619 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1620 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1621 }
6adaed5b 1622 drm_modeset_unlock_all(dev);
d8dcaa1d 1623
f3cbb17b 1624 /* unpin the front buffers and cursors */
771fe6b9 1625 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
f3cbb17b 1626 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
f4510a27 1627 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
4c788679 1628 struct radeon_bo *robj;
771fe6b9 1629
f3cbb17b
GG
1630 if (radeon_crtc->cursor_bo) {
1631 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1632 r = radeon_bo_reserve(robj, false);
1633 if (r == 0) {
1634 radeon_bo_unpin(robj);
1635 radeon_bo_unreserve(robj);
1636 }
1637 }
1638
771fe6b9
JG
1639 if (rfb == NULL || rfb->obj == NULL) {
1640 continue;
1641 }
7e4d15d9 1642 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1643 /* don't unpin kernel fb objects */
1644 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1645 r = radeon_bo_reserve(robj, false);
38651674 1646 if (r == 0) {
4c788679
JG
1647 radeon_bo_unpin(robj);
1648 radeon_bo_unreserve(robj);
1649 }
771fe6b9
JG
1650 }
1651 }
1652 /* evict vram memory */
4c788679 1653 radeon_bo_evict_vram(rdev);
8a47cc9e 1654
771fe6b9 1655 /* wait for gpu to finish processing current batch */
5f8f635e 1656 for (i = 0; i < RADEON_NUM_RINGS; i++) {
37615527 1657 r = radeon_fence_wait_empty(rdev, i);
5f8f635e
JG
1658 if (r) {
1659 /* delay GPU reset to resume */
eb98c709 1660 radeon_fence_driver_force_completion(rdev, i);
5f8f635e
JG
1661 }
1662 }
771fe6b9 1663
f657c2a7
YZ
1664 radeon_save_bios_scratch_regs(rdev);
1665
62a8ea3f 1666 radeon_suspend(rdev);
d4877cf2 1667 radeon_hpd_fini(rdev);
ec9aaaff
AD
1668 /* evict remaining vram memory
1669 * This second call to evict vram is to evict the gart page table
1670 * using the CPU.
1671 */
4c788679 1672 radeon_bo_evict_vram(rdev);
771fe6b9 1673
10b06122
JG
1674 radeon_agp_suspend(rdev);
1675
771fe6b9 1676 pci_save_state(dev->pdev);
ccaa2c12 1677 if (freeze && rdev->family >= CHIP_CEDAR) {
274ad65c
JG
1678 rdev->asic->asic_reset(rdev, true);
1679 pci_restore_state(dev->pdev);
1680 } else if (suspend) {
771fe6b9
JG
1681 /* Shut down the device */
1682 pci_disable_device(dev->pdev);
1683 pci_set_power_state(dev->pdev, PCI_D3hot);
1684 }
10ebc0bc
DA
1685
1686 if (fbcon) {
1687 console_lock();
1688 radeon_fbdev_set_suspend(rdev, 1);
1689 console_unlock();
1690 }
771fe6b9
JG
1691 return 0;
1692}
1693
0c195119
AD
1694/**
1695 * radeon_resume_kms - initiate device resume
1696 *
1697 * @pdev: drm dev pointer
1698 *
1699 * Bring the hw back to operating state (all asics).
1700 * Returns 0 for success or an error on failure.
1701 * Called at driver resume.
1702 */
10ebc0bc 1703int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
771fe6b9 1704{
09bdf591 1705 struct drm_connector *connector;
771fe6b9 1706 struct radeon_device *rdev = dev->dev_private;
f3cbb17b 1707 struct drm_crtc *crtc;
04eb2206 1708 int r;
771fe6b9 1709
f2aba352 1710 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1711 return 0;
1712
10ebc0bc
DA
1713 if (fbcon) {
1714 console_lock();
1715 }
7473e830
DA
1716 if (resume) {
1717 pci_set_power_state(dev->pdev, PCI_D0);
1718 pci_restore_state(dev->pdev);
1719 if (pci_enable_device(dev->pdev)) {
10ebc0bc
DA
1720 if (fbcon)
1721 console_unlock();
7473e830
DA
1722 return -1;
1723 }
771fe6b9 1724 }
0ebf1717
DA
1725 /* resume AGP if in use */
1726 radeon_agp_resume(rdev);
62a8ea3f 1727 radeon_resume(rdev);
04eb2206
CK
1728
1729 r = radeon_ib_ring_tests(rdev);
1730 if (r)
1731 DRM_ERROR("ib ring test failed (%d).\n", r);
1732
bc6a6295 1733 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
6c7bccea
AD
1734 /* do dpm late init */
1735 r = radeon_pm_late_init(rdev);
1736 if (r) {
1737 rdev->pm.dpm_enabled = false;
1738 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1739 }
bc6a6295
AD
1740 } else {
1741 /* resume old pm late */
1742 radeon_pm_resume(rdev);
6c7bccea
AD
1743 }
1744
f657c2a7 1745 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1746
f3cbb17b
GG
1747 /* pin cursors */
1748 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1749 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1750
1751 if (radeon_crtc->cursor_bo) {
1752 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1753 r = radeon_bo_reserve(robj, false);
1754 if (r == 0) {
1755 /* Only 27 bit offset for legacy cursor */
1756 r = radeon_bo_pin_restricted(robj,
1757 RADEON_GEM_DOMAIN_VRAM,
1758 ASIC_IS_AVIVO(rdev) ?
1759 0 : 1 << 27,
1760 &radeon_crtc->cursor_addr);
1761 if (r != 0)
1762 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1763 radeon_bo_unreserve(robj);
1764 }
1765 }
1766 }
1767
3fa47d9e
AD
1768 /* init dig PHYs, disp eng pll */
1769 if (rdev->is_atom_bios) {
ac89af1e 1770 radeon_atom_encoder_init(rdev);
f3f1f03e 1771 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1772 /* turn on the BL */
1773 if (rdev->mode_info.bl_encoder) {
1774 u8 bl_level = radeon_get_backlight_level(rdev,
1775 rdev->mode_info.bl_encoder);
1776 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1777 bl_level);
1778 }
3fa47d9e 1779 }
d4877cf2
AD
1780 /* reset hpd state */
1781 radeon_hpd_init(rdev);
771fe6b9 1782 /* blat the mode back in */
ec9954fc
DA
1783 if (fbcon) {
1784 drm_helper_resume_force_mode(dev);
1785 /* turn on display hw */
6adaed5b 1786 drm_modeset_lock_all(dev);
ec9954fc
DA
1787 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1788 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1789 }
6adaed5b 1790 drm_modeset_unlock_all(dev);
a93f344d 1791 }
86698c20
SF
1792
1793 drm_kms_helper_poll_enable(dev);
18ee37a4 1794
3640da2f
AD
1795 /* set the power state here in case we are a PX system or headless */
1796 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1797 radeon_pm_compute_clocks(rdev);
1798
18ee37a4
DV
1799 if (fbcon) {
1800 radeon_fbdev_set_suspend(rdev, 0);
1801 console_unlock();
1802 }
1803
771fe6b9
JG
1804 return 0;
1805}
1806
0c195119
AD
1807/**
1808 * radeon_gpu_reset - reset the asic
1809 *
1810 * @rdev: radeon device pointer
1811 *
1812 * Attempt the reset the GPU if it has hung (all asics).
1813 * Returns 0 for success or an error on failure.
1814 */
90aca4d2
JG
1815int radeon_gpu_reset(struct radeon_device *rdev)
1816{
55d7c221
CK
1817 unsigned ring_sizes[RADEON_NUM_RINGS];
1818 uint32_t *ring_data[RADEON_NUM_RINGS];
1819
1820 bool saved = false;
1821
1822 int i, r;
8fd1b84c 1823 int resched;
90aca4d2 1824
dee53e7f 1825 down_write(&rdev->exclusive_lock);
f9eaf9ae
CK
1826
1827 if (!rdev->needs_reset) {
1828 up_write(&rdev->exclusive_lock);
1829 return 0;
1830 }
1831
72b9076b
MO
1832 atomic_inc(&rdev->gpu_reset_counter);
1833
90aca4d2 1834 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1835 /* block TTM */
1836 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2 1837 radeon_suspend(rdev);
73ef0e0d 1838 radeon_hpd_fini(rdev);
90aca4d2 1839
55d7c221
CK
1840 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1841 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1842 &ring_data[i]);
1843 if (ring_sizes[i]) {
1844 saved = true;
1845 dev_info(rdev->dev, "Saved %d dwords of commands "
1846 "on ring %d.\n", ring_sizes[i], i);
1847 }
1848 }
1849
90aca4d2
JG
1850 r = radeon_asic_reset(rdev);
1851 if (!r) {
55d7c221 1852 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1853 radeon_resume(rdev);
55d7c221 1854 }
04eb2206 1855
55d7c221 1856 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1857
9bb39ff4
ML
1858 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1859 if (!r && ring_data[i]) {
55d7c221
CK
1860 radeon_ring_restore(rdev, &rdev->ring[i],
1861 ring_sizes[i], ring_data[i]);
9bb39ff4 1862 } else {
eb98c709 1863 radeon_fence_driver_force_completion(rdev, i);
55d7c221
CK
1864 kfree(ring_data[i]);
1865 }
90aca4d2 1866 }
7a1619b9 1867
c940b447
AD
1868 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1869 /* do dpm late init */
1870 r = radeon_pm_late_init(rdev);
1871 if (r) {
1872 rdev->pm.dpm_enabled = false;
1873 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1874 }
1875 } else {
1876 /* resume old pm late */
1877 radeon_pm_resume(rdev);
1878 }
1879
73ef0e0d
AD
1880 /* init dig PHYs, disp eng pll */
1881 if (rdev->is_atom_bios) {
1882 radeon_atom_encoder_init(rdev);
1883 radeon_atom_disp_eng_pll_init(rdev);
1884 /* turn on the BL */
1885 if (rdev->mode_info.bl_encoder) {
1886 u8 bl_level = radeon_get_backlight_level(rdev,
1887 rdev->mode_info.bl_encoder);
1888 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1889 bl_level);
1890 }
1891 }
1892 /* reset hpd state */
1893 radeon_hpd_init(rdev);
1894
9bb39ff4 1895 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
3c036389
CK
1896
1897 rdev->in_reset = true;
1898 rdev->needs_reset = false;
1899
9bb39ff4
ML
1900 downgrade_write(&rdev->exclusive_lock);
1901
d3493574
JG
1902 drm_helper_resume_force_mode(rdev->ddev);
1903
c940b447
AD
1904 /* set the power state here in case we are a PX system or headless */
1905 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1906 radeon_pm_compute_clocks(rdev);
1907
9bb39ff4
ML
1908 if (!r) {
1909 r = radeon_ib_ring_tests(rdev);
1910 if (r && saved)
1911 r = -EAGAIN;
1912 } else {
7a1619b9
MD
1913 /* bad news, how to tell it to userspace ? */
1914 dev_info(rdev->dev, "GPU reset failed\n");
1915 }
1916
9bb39ff4
ML
1917 rdev->needs_reset = r == -EAGAIN;
1918 rdev->in_reset = false;
1919
1920 up_read(&rdev->exclusive_lock);
90aca4d2
JG
1921 return r;
1922}
1923
771fe6b9
JG
1924
1925/*
1926 * Debugfs
1927 */
771fe6b9
JG
1928int radeon_debugfs_add_files(struct radeon_device *rdev,
1929 struct drm_info_list *files,
1930 unsigned nfiles)
1931{
1932 unsigned i;
1933
4d8bf9ae
CK
1934 for (i = 0; i < rdev->debugfs_count; i++) {
1935 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1936 /* Already registered */
1937 return 0;
1938 }
1939 }
c245cb9e 1940
4d8bf9ae 1941 i = rdev->debugfs_count + 1;
c245cb9e
MW
1942 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1943 DRM_ERROR("Reached maximum number of debugfs components.\n");
1944 DRM_ERROR("Report so we increase "
3cf8bb1a 1945 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1946 return -EINVAL;
1947 }
4d8bf9ae
CK
1948 rdev->debugfs[rdev->debugfs_count].files = files;
1949 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1950 rdev->debugfs_count = i;
771fe6b9 1951#if defined(CONFIG_DEBUG_FS)
771fe6b9
JG
1952 drm_debugfs_create_files(files, nfiles,
1953 rdev->ddev->primary->debugfs_root,
1954 rdev->ddev->primary);
1955#endif
1956 return 0;
1957}