]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/radeon/radeon_device.c
Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
624d3524 96 "OLAND",
b5d9d726 97 "HAINAN",
6eac752e
AD
98 "BONAIRE",
99 "KAVERI",
100 "KABINI",
3bf599e8 101 "HAWAII",
b0a9f22a 102 "MULLINS",
1b5331d9
JG
103 "LAST",
104};
105
4807c5a8
AD
106#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
107#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
108
109struct radeon_px_quirk {
110 u32 chip_vendor;
111 u32 chip_device;
112 u32 subsys_vendor;
113 u32 subsys_device;
114 u32 px_quirk_flags;
115};
116
117static struct radeon_px_quirk radeon_px_quirk_list[] = {
118 /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
119 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
120 */
121 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
122 /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
123 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
124 */
125 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
ff1b1294
AD
126 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
127 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
128 */
129 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
4807c5a8
AD
130 /* macbook pro 8.2 */
131 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
132 { 0, 0, 0, 0, 0 },
133};
134
90c4cde9
AD
135bool radeon_is_px(struct drm_device *dev)
136{
137 struct radeon_device *rdev = dev->dev_private;
138
139 if (rdev->flags & RADEON_IS_PX)
140 return true;
141 return false;
142}
10ebc0bc 143
4807c5a8
AD
144static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
145{
146 struct radeon_px_quirk *p = radeon_px_quirk_list;
147
148 /* Apply PX quirks */
149 while (p && p->chip_device != 0) {
150 if (rdev->pdev->vendor == p->chip_vendor &&
151 rdev->pdev->device == p->chip_device &&
152 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
153 rdev->pdev->subsystem_device == p->subsys_device) {
154 rdev->px_quirk_flags = p->px_quirk_flags;
155 break;
156 }
157 ++p;
158 }
159
160 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
161 rdev->flags &= ~RADEON_IS_PX;
162}
163
2e1b65f9
AD
164/**
165 * radeon_program_register_sequence - program an array of registers.
166 *
167 * @rdev: radeon_device pointer
168 * @registers: pointer to the register array
169 * @array_size: size of the register array
170 *
171 * Programs an array or registers with and and or masks.
172 * This is a helper for setting golden registers.
173 */
174void radeon_program_register_sequence(struct radeon_device *rdev,
175 const u32 *registers,
176 const u32 array_size)
177{
178 u32 tmp, reg, and_mask, or_mask;
179 int i;
180
181 if (array_size % 3)
182 return;
183
184 for (i = 0; i < array_size; i +=3) {
185 reg = registers[i + 0];
186 and_mask = registers[i + 1];
187 or_mask = registers[i + 2];
188
189 if (and_mask == 0xffffffff) {
190 tmp = or_mask;
191 } else {
192 tmp = RREG32(reg);
193 tmp &= ~and_mask;
194 tmp |= or_mask;
195 }
196 WREG32(reg, tmp);
197 }
198}
199
1a0041b8
AD
200void radeon_pci_config_reset(struct radeon_device *rdev)
201{
202 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
203}
204
0c195119
AD
205/**
206 * radeon_surface_init - Clear GPU surface registers.
207 *
208 * @rdev: radeon_device pointer
209 *
210 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 211 */
3ce0a23d 212void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
213{
214 /* FIXME: check this out */
215 if (rdev->family < CHIP_R600) {
216 int i;
217
550e2d92
DA
218 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
219 if (rdev->surface_regs[i].bo)
220 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
221 else
222 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 223 }
e024e110
DA
224 /* enable surfaces */
225 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
226 }
227}
228
771fe6b9
JG
229/*
230 * GPU scratch registers helpers function.
231 */
0c195119
AD
232/**
233 * radeon_scratch_init - Init scratch register driver information.
234 *
235 * @rdev: radeon_device pointer
236 *
237 * Init CP scratch register driver information (r1xx-r5xx)
238 */
3ce0a23d 239void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
240{
241 int i;
242
243 /* FIXME: check this out */
244 if (rdev->family < CHIP_R300) {
245 rdev->scratch.num_reg = 5;
246 } else {
247 rdev->scratch.num_reg = 7;
248 }
724c80e1 249 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
250 for (i = 0; i < rdev->scratch.num_reg; i++) {
251 rdev->scratch.free[i] = true;
724c80e1 252 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
253 }
254}
255
0c195119
AD
256/**
257 * radeon_scratch_get - Allocate a scratch register
258 *
259 * @rdev: radeon_device pointer
260 * @reg: scratch register mmio offset
261 *
262 * Allocate a CP scratch register for use by the driver (all asics).
263 * Returns 0 on success or -EINVAL on failure.
264 */
771fe6b9
JG
265int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
266{
267 int i;
268
269 for (i = 0; i < rdev->scratch.num_reg; i++) {
270 if (rdev->scratch.free[i]) {
271 rdev->scratch.free[i] = false;
272 *reg = rdev->scratch.reg[i];
273 return 0;
274 }
275 }
276 return -EINVAL;
277}
278
0c195119
AD
279/**
280 * radeon_scratch_free - Free a scratch register
281 *
282 * @rdev: radeon_device pointer
283 * @reg: scratch register mmio offset
284 *
285 * Free a CP scratch register allocated for use by the driver (all asics)
286 */
771fe6b9
JG
287void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
288{
289 int i;
290
291 for (i = 0; i < rdev->scratch.num_reg; i++) {
292 if (rdev->scratch.reg[i] == reg) {
293 rdev->scratch.free[i] = true;
294 return;
295 }
296 }
297}
298
75efdee1
AD
299/*
300 * GPU doorbell aperture helpers function.
301 */
302/**
303 * radeon_doorbell_init - Init doorbell driver information.
304 *
305 * @rdev: radeon_device pointer
306 *
307 * Init doorbell driver information (CIK)
308 * Returns 0 on success, error on failure.
309 */
28f5a6cd 310static int radeon_doorbell_init(struct radeon_device *rdev)
75efdee1 311{
75efdee1
AD
312 /* doorbell bar mapping */
313 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
314 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
315
d5754ab8
AL
316 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
317 if (rdev->doorbell.num_doorbells == 0)
318 return -EINVAL;
75efdee1 319
d5754ab8 320 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
75efdee1
AD
321 if (rdev->doorbell.ptr == NULL) {
322 return -ENOMEM;
323 }
324 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
325 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
326
d5754ab8 327 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
75efdee1 328
75efdee1
AD
329 return 0;
330}
331
332/**
333 * radeon_doorbell_fini - Tear down doorbell driver information.
334 *
335 * @rdev: radeon_device pointer
336 *
337 * Tear down doorbell driver information (CIK)
338 */
28f5a6cd 339static void radeon_doorbell_fini(struct radeon_device *rdev)
75efdee1
AD
340{
341 iounmap(rdev->doorbell.ptr);
342 rdev->doorbell.ptr = NULL;
343}
344
345/**
d5754ab8 346 * radeon_doorbell_get - Allocate a doorbell entry
75efdee1
AD
347 *
348 * @rdev: radeon_device pointer
d5754ab8 349 * @doorbell: doorbell index
75efdee1 350 *
d5754ab8 351 * Allocate a doorbell for use by the driver (all asics).
75efdee1
AD
352 * Returns 0 on success or -EINVAL on failure.
353 */
354int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
355{
d5754ab8
AL
356 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
357 if (offset < rdev->doorbell.num_doorbells) {
358 __set_bit(offset, rdev->doorbell.used);
359 *doorbell = offset;
360 return 0;
361 } else {
362 return -EINVAL;
75efdee1 363 }
75efdee1
AD
364}
365
366/**
d5754ab8 367 * radeon_doorbell_free - Free a doorbell entry
75efdee1
AD
368 *
369 * @rdev: radeon_device pointer
d5754ab8 370 * @doorbell: doorbell index
75efdee1 371 *
d5754ab8 372 * Free a doorbell allocated for use by the driver (all asics)
75efdee1
AD
373 */
374void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
375{
d5754ab8
AL
376 if (doorbell < rdev->doorbell.num_doorbells)
377 __clear_bit(doorbell, rdev->doorbell.used);
75efdee1
AD
378}
379
ebff8453
OG
380/**
381 * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
382 * setup KFD
383 *
384 * @rdev: radeon_device pointer
385 * @aperture_base: output returning doorbell aperture base physical address
386 * @aperture_size: output returning doorbell aperture size in bytes
387 * @start_offset: output returning # of doorbell bytes reserved for radeon.
388 *
389 * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
390 * takes doorbells required for its own rings and reports the setup to KFD.
391 * Radeon reserved doorbells are at the start of the doorbell aperture.
392 */
393void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
394 phys_addr_t *aperture_base,
395 size_t *aperture_size,
396 size_t *start_offset)
397{
398 /* The first num_doorbells are used by radeon.
399 * KFD takes whatever's left in the aperture. */
400 if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
401 *aperture_base = rdev->doorbell.base;
402 *aperture_size = rdev->doorbell.size;
403 *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
404 } else {
405 *aperture_base = 0;
406 *aperture_size = 0;
407 *start_offset = 0;
408 }
409}
410
0c195119
AD
411/*
412 * radeon_wb_*()
413 * Writeback is the the method by which the the GPU updates special pages
414 * in memory with the status of certain GPU events (fences, ring pointers,
415 * etc.).
416 */
417
418/**
419 * radeon_wb_disable - Disable Writeback
420 *
421 * @rdev: radeon_device pointer
422 *
423 * Disables Writeback (all asics). Used for suspend.
424 */
724c80e1
AD
425void radeon_wb_disable(struct radeon_device *rdev)
426{
724c80e1
AD
427 rdev->wb.enabled = false;
428}
429
0c195119
AD
430/**
431 * radeon_wb_fini - Disable Writeback and free memory
432 *
433 * @rdev: radeon_device pointer
434 *
435 * Disables Writeback and frees the Writeback memory (all asics).
436 * Used at driver shutdown.
437 */
724c80e1
AD
438void radeon_wb_fini(struct radeon_device *rdev)
439{
440 radeon_wb_disable(rdev);
441 if (rdev->wb.wb_obj) {
089920f2
JG
442 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
443 radeon_bo_kunmap(rdev->wb.wb_obj);
444 radeon_bo_unpin(rdev->wb.wb_obj);
445 radeon_bo_unreserve(rdev->wb.wb_obj);
446 }
724c80e1
AD
447 radeon_bo_unref(&rdev->wb.wb_obj);
448 rdev->wb.wb = NULL;
449 rdev->wb.wb_obj = NULL;
450 }
451}
452
0c195119
AD
453/**
454 * radeon_wb_init- Init Writeback driver info and allocate memory
455 *
456 * @rdev: radeon_device pointer
457 *
458 * Disables Writeback and frees the Writeback memory (all asics).
459 * Used at driver startup.
460 * Returns 0 on success or an -error on failure.
461 */
724c80e1
AD
462int radeon_wb_init(struct radeon_device *rdev)
463{
464 int r;
465
466 if (rdev->wb.wb_obj == NULL) {
441921d5 467 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
831b6966 468 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
02376d82 469 &rdev->wb.wb_obj);
724c80e1
AD
470 if (r) {
471 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
472 return r;
473 }
089920f2
JG
474 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
475 if (unlikely(r != 0)) {
476 radeon_wb_fini(rdev);
477 return r;
478 }
479 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
480 &rdev->wb.gpu_addr);
481 if (r) {
482 radeon_bo_unreserve(rdev->wb.wb_obj);
483 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
484 radeon_wb_fini(rdev);
485 return r;
486 }
487 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
724c80e1 488 radeon_bo_unreserve(rdev->wb.wb_obj);
089920f2
JG
489 if (r) {
490 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
491 radeon_wb_fini(rdev);
492 return r;
493 }
724c80e1
AD
494 }
495
e6ba7599
AD
496 /* clear wb memory */
497 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
498 /* disable event_write fences */
499 rdev->wb.use_event = false;
724c80e1 500 /* disabled via module param */
3b7a2b24 501 if (radeon_no_wb == 1) {
724c80e1 502 rdev->wb.enabled = false;
3b7a2b24 503 } else {
724c80e1 504 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
505 /* often unreliable on AGP */
506 rdev->wb.enabled = false;
507 } else if (rdev->family < CHIP_R300) {
508 /* often unreliable on pre-r300 */
724c80e1 509 rdev->wb.enabled = false;
d0f8a854 510 } else {
724c80e1 511 rdev->wb.enabled = true;
d0f8a854 512 /* event_write fences are only available on r600+ */
3b7a2b24 513 if (rdev->family >= CHIP_R600) {
d0f8a854 514 rdev->wb.use_event = true;
3b7a2b24 515 }
d0f8a854 516 }
724c80e1 517 }
c994ead6
AD
518 /* always use writeback/events on NI, APUs */
519 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
520 rdev->wb.enabled = true;
521 rdev->wb.use_event = true;
522 }
724c80e1
AD
523
524 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
525
526 return 0;
527}
528
d594e46a
JG
529/**
530 * radeon_vram_location - try to find VRAM location
531 * @rdev: radeon device structure holding all necessary informations
532 * @mc: memory controller structure holding memory informations
533 * @base: base address at which to put VRAM
534 *
535 * Function will place try to place VRAM at base address provided
536 * as parameter (which is so far either PCI aperture address or
537 * for IGP TOM base address).
538 *
539 * If there is not enough space to fit the unvisible VRAM in the 32bits
540 * address space then we limit the VRAM size to the aperture.
541 *
542 * If we are using AGP and if the AGP aperture doesn't allow us to have
543 * room for all the VRAM than we restrict the VRAM to the PCI aperture
544 * size and print a warning.
545 *
546 * This function will never fails, worst case are limiting VRAM.
547 *
548 * Note: GTT start, end, size should be initialized before calling this
549 * function on AGP platform.
550 *
25985edc 551 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
552 * this shouldn't be a problem as we are using the PCI aperture as a reference.
553 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
554 * not IGP.
555 *
556 * Note: we use mc_vram_size as on some board we need to program the mc to
557 * cover the whole aperture even if VRAM size is inferior to aperture size
558 * Novell bug 204882 + along with lots of ubuntu ones
559 *
560 * Note: when limiting vram it's safe to overwritte real_vram_size because
561 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
562 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
563 * ones)
564 *
565 * Note: IGP TOM addr should be the same as the aperture addr, we don't
566 * explicitly check for that thought.
567 *
568 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 569 */
d594e46a 570void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 571{
1bcb04f7
CK
572 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
573
d594e46a 574 mc->vram_start = base;
9ed8b1f9 575 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
576 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
577 mc->real_vram_size = mc->aper_size;
578 mc->mc_vram_size = mc->aper_size;
579 }
580 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 581 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
582 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
583 mc->real_vram_size = mc->aper_size;
584 mc->mc_vram_size = mc->aper_size;
585 }
586 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
587 if (limit && limit < mc->real_vram_size)
588 mc->real_vram_size = limit;
dd7cc55a 589 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
590 mc->mc_vram_size >> 20, mc->vram_start,
591 mc->vram_end, mc->real_vram_size >> 20);
592}
771fe6b9 593
d594e46a
JG
594/**
595 * radeon_gtt_location - try to find GTT location
596 * @rdev: radeon device structure holding all necessary informations
597 * @mc: memory controller structure holding memory informations
598 *
599 * Function will place try to place GTT before or after VRAM.
600 *
601 * If GTT size is bigger than space left then we ajust GTT size.
602 * Thus function will never fails.
603 *
604 * FIXME: when reducing GTT size align new size on power of 2.
605 */
606void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
607{
608 u64 size_af, size_bf;
609
9ed8b1f9 610 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 611 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
612 if (size_bf > size_af) {
613 if (mc->gtt_size > size_bf) {
614 dev_warn(rdev->dev, "limiting GTT\n");
615 mc->gtt_size = size_bf;
771fe6b9 616 }
8d369bb1 617 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 618 } else {
d594e46a
JG
619 if (mc->gtt_size > size_af) {
620 dev_warn(rdev->dev, "limiting GTT\n");
621 mc->gtt_size = size_af;
622 }
8d369bb1 623 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 624 }
d594e46a 625 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 626 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 627 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
628}
629
771fe6b9
JG
630/*
631 * GPU helpers function.
632 */
05082b8b
AD
633
634/**
635 * radeon_device_is_virtual - check if we are running is a virtual environment
636 *
637 * Check if the asic has been passed through to a VM (all asics).
638 * Used at driver startup.
639 * Returns true if virtual or false if not.
640 */
641static bool radeon_device_is_virtual(void)
642{
643#ifdef CONFIG_X86
644 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
645#else
646 return false;
647#endif
648}
649
0c195119
AD
650/**
651 * radeon_card_posted - check if the hw has already been initialized
652 *
653 * @rdev: radeon_device pointer
654 *
655 * Check if the asic has been initialized (all asics).
656 * Used at driver startup.
657 * Returns true if initialized or false if not.
658 */
9f022ddf 659bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
660{
661 uint32_t reg;
662
05082b8b
AD
663 /* for pass through, always force asic_init */
664 if (radeon_device_is_virtual())
665 return false;
666
50a583f6 667 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
83e68189 668 if (efi_enabled(EFI_BOOT) &&
50a583f6
AD
669 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
670 (rdev->family < CHIP_R600))
bcc65fd8
MG
671 return false;
672
2cf3a4fc
AD
673 if (ASIC_IS_NODCE(rdev))
674 goto check_memsize;
675
771fe6b9 676 /* first check CRTCs */
09fb8bd1 677 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
678 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
679 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
680 if (rdev->num_crtc >= 4) {
681 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
682 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
683 }
684 if (rdev->num_crtc >= 6) {
685 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
686 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
687 }
bcc1c2a1
AD
688 if (reg & EVERGREEN_CRTC_MASTER_EN)
689 return true;
690 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
691 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
692 RREG32(AVIVO_D2CRTC_CONTROL);
693 if (reg & AVIVO_CRTC_EN) {
694 return true;
695 }
696 } else {
697 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
698 RREG32(RADEON_CRTC2_GEN_CNTL);
699 if (reg & RADEON_CRTC_EN) {
700 return true;
701 }
702 }
703
2cf3a4fc 704check_memsize:
771fe6b9
JG
705 /* then check MEM_SIZE, in case the crtcs are off */
706 if (rdev->family >= CHIP_R600)
707 reg = RREG32(R600_CONFIG_MEMSIZE);
708 else
709 reg = RREG32(RADEON_CONFIG_MEMSIZE);
710
711 if (reg)
712 return true;
713
714 return false;
715
716}
717
0c195119
AD
718/**
719 * radeon_update_bandwidth_info - update display bandwidth params
720 *
721 * @rdev: radeon_device pointer
722 *
723 * Used when sclk/mclk are switched or display modes are set.
724 * params are used to calculate display watermarks (all asics)
725 */
f47299c5
AD
726void radeon_update_bandwidth_info(struct radeon_device *rdev)
727{
728 fixed20_12 a;
8807286e
AD
729 u32 sclk = rdev->pm.current_sclk;
730 u32 mclk = rdev->pm.current_mclk;
f47299c5 731
8807286e
AD
732 /* sclk/mclk in Mhz */
733 a.full = dfixed_const(100);
734 rdev->pm.sclk.full = dfixed_const(sclk);
735 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
736 rdev->pm.mclk.full = dfixed_const(mclk);
737 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 738
8807286e 739 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 740 a.full = dfixed_const(16);
f47299c5 741 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 742 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
743 }
744}
745
0c195119
AD
746/**
747 * radeon_boot_test_post_card - check and possibly initialize the hw
748 *
749 * @rdev: radeon_device pointer
750 *
751 * Check if the asic is initialized and if not, attempt to initialize
752 * it (all asics).
753 * Returns true if initialized or false if not.
754 */
72542d77
DA
755bool radeon_boot_test_post_card(struct radeon_device *rdev)
756{
757 if (radeon_card_posted(rdev))
758 return true;
759
760 if (rdev->bios) {
761 DRM_INFO("GPU not posted. posting now...\n");
762 if (rdev->is_atom_bios)
763 atom_asic_init(rdev->mode_info.atom_context);
764 else
765 radeon_combios_asic_init(rdev->ddev);
766 return true;
767 } else {
768 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
769 return false;
770 }
771}
772
0c195119
AD
773/**
774 * radeon_dummy_page_init - init dummy page used by the driver
775 *
776 * @rdev: radeon_device pointer
777 *
778 * Allocate the dummy page used by the driver (all asics).
779 * This dummy page is used by the driver as a filler for gart entries
780 * when pages are taken out of the GART
781 * Returns 0 on sucess, -ENOMEM on failure.
782 */
3ce0a23d
JG
783int radeon_dummy_page_init(struct radeon_device *rdev)
784{
82568565
DA
785 if (rdev->dummy_page.page)
786 return 0;
3ce0a23d
JG
787 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
788 if (rdev->dummy_page.page == NULL)
789 return -ENOMEM;
790 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
791 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
792 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
793 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
794 __free_page(rdev->dummy_page.page);
795 rdev->dummy_page.page = NULL;
796 return -ENOMEM;
797 }
cb658906
MD
798 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
799 RADEON_GART_PAGE_DUMMY);
3ce0a23d
JG
800 return 0;
801}
802
0c195119
AD
803/**
804 * radeon_dummy_page_fini - free dummy page used by the driver
805 *
806 * @rdev: radeon_device pointer
807 *
808 * Frees the dummy page used by the driver (all asics).
809 */
3ce0a23d
JG
810void radeon_dummy_page_fini(struct radeon_device *rdev)
811{
812 if (rdev->dummy_page.page == NULL)
813 return;
814 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
815 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
816 __free_page(rdev->dummy_page.page);
817 rdev->dummy_page.page = NULL;
818}
819
771fe6b9 820
771fe6b9 821/* ATOM accessor methods */
0c195119
AD
822/*
823 * ATOM is an interpreted byte code stored in tables in the vbios. The
824 * driver registers callbacks to access registers and the interpreter
825 * in the driver parses the tables and executes then to program specific
826 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
827 * atombios.h, and atom.c
828 */
829
830/**
831 * cail_pll_read - read PLL register
832 *
833 * @info: atom card_info pointer
834 * @reg: PLL register offset
835 *
836 * Provides a PLL register accessor for the atom interpreter (r4xx+).
837 * Returns the value of the PLL register.
838 */
771fe6b9
JG
839static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
840{
841 struct radeon_device *rdev = info->dev->dev_private;
842 uint32_t r;
843
844 r = rdev->pll_rreg(rdev, reg);
845 return r;
846}
847
0c195119
AD
848/**
849 * cail_pll_write - write PLL register
850 *
851 * @info: atom card_info pointer
852 * @reg: PLL register offset
853 * @val: value to write to the pll register
854 *
855 * Provides a PLL register accessor for the atom interpreter (r4xx+).
856 */
771fe6b9
JG
857static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
858{
859 struct radeon_device *rdev = info->dev->dev_private;
860
861 rdev->pll_wreg(rdev, reg, val);
862}
863
0c195119
AD
864/**
865 * cail_mc_read - read MC (Memory Controller) register
866 *
867 * @info: atom card_info pointer
868 * @reg: MC register offset
869 *
870 * Provides an MC register accessor for the atom interpreter (r4xx+).
871 * Returns the value of the MC register.
872 */
771fe6b9
JG
873static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
874{
875 struct radeon_device *rdev = info->dev->dev_private;
876 uint32_t r;
877
878 r = rdev->mc_rreg(rdev, reg);
879 return r;
880}
881
0c195119
AD
882/**
883 * cail_mc_write - write MC (Memory Controller) register
884 *
885 * @info: atom card_info pointer
886 * @reg: MC register offset
887 * @val: value to write to the pll register
888 *
889 * Provides a MC register accessor for the atom interpreter (r4xx+).
890 */
771fe6b9
JG
891static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
892{
893 struct radeon_device *rdev = info->dev->dev_private;
894
895 rdev->mc_wreg(rdev, reg, val);
896}
897
0c195119
AD
898/**
899 * cail_reg_write - write MMIO register
900 *
901 * @info: atom card_info pointer
902 * @reg: MMIO register offset
903 * @val: value to write to the pll register
904 *
905 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
906 */
771fe6b9
JG
907static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
908{
909 struct radeon_device *rdev = info->dev->dev_private;
910
911 WREG32(reg*4, val);
912}
913
0c195119
AD
914/**
915 * cail_reg_read - read MMIO register
916 *
917 * @info: atom card_info pointer
918 * @reg: MMIO register offset
919 *
920 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
921 * Returns the value of the MMIO register.
922 */
771fe6b9
JG
923static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
924{
925 struct radeon_device *rdev = info->dev->dev_private;
926 uint32_t r;
927
928 r = RREG32(reg*4);
929 return r;
930}
931
0c195119
AD
932/**
933 * cail_ioreg_write - write IO register
934 *
935 * @info: atom card_info pointer
936 * @reg: IO register offset
937 * @val: value to write to the pll register
938 *
939 * Provides a IO register accessor for the atom interpreter (r4xx+).
940 */
351a52a2
AD
941static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
942{
943 struct radeon_device *rdev = info->dev->dev_private;
944
945 WREG32_IO(reg*4, val);
946}
947
0c195119
AD
948/**
949 * cail_ioreg_read - read IO register
950 *
951 * @info: atom card_info pointer
952 * @reg: IO register offset
953 *
954 * Provides an IO register accessor for the atom interpreter (r4xx+).
955 * Returns the value of the IO register.
956 */
351a52a2
AD
957static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
958{
959 struct radeon_device *rdev = info->dev->dev_private;
960 uint32_t r;
961
962 r = RREG32_IO(reg*4);
963 return r;
964}
965
0c195119
AD
966/**
967 * radeon_atombios_init - init the driver info and callbacks for atombios
968 *
969 * @rdev: radeon_device pointer
970 *
971 * Initializes the driver info and register access callbacks for the
972 * ATOM interpreter (r4xx+).
973 * Returns 0 on sucess, -ENOMEM on failure.
974 * Called at driver startup.
975 */
771fe6b9
JG
976int radeon_atombios_init(struct radeon_device *rdev)
977{
61c4b24b
MF
978 struct card_info *atom_card_info =
979 kzalloc(sizeof(struct card_info), GFP_KERNEL);
980
981 if (!atom_card_info)
982 return -ENOMEM;
983
984 rdev->mode_info.atom_card_info = atom_card_info;
985 atom_card_info->dev = rdev->ddev;
986 atom_card_info->reg_read = cail_reg_read;
987 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
988 /* needed for iio ops */
989 if (rdev->rio_mem) {
990 atom_card_info->ioreg_read = cail_ioreg_read;
991 atom_card_info->ioreg_write = cail_ioreg_write;
992 } else {
993 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
994 atom_card_info->ioreg_read = cail_reg_read;
995 atom_card_info->ioreg_write = cail_reg_write;
996 }
61c4b24b
MF
997 atom_card_info->mc_read = cail_mc_read;
998 atom_card_info->mc_write = cail_mc_write;
999 atom_card_info->pll_read = cail_pll_read;
1000 atom_card_info->pll_write = cail_pll_write;
1001
1002 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
1003 if (!rdev->mode_info.atom_context) {
1004 radeon_atombios_fini(rdev);
1005 return -ENOMEM;
1006 }
1007
c31ad97f 1008 mutex_init(&rdev->mode_info.atom_context->mutex);
1c949842 1009 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
771fe6b9 1010 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 1011 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
1012 return 0;
1013}
1014
0c195119
AD
1015/**
1016 * radeon_atombios_fini - free the driver info and callbacks for atombios
1017 *
1018 * @rdev: radeon_device pointer
1019 *
1020 * Frees the driver info and register access callbacks for the ATOM
1021 * interpreter (r4xx+).
1022 * Called at driver shutdown.
1023 */
771fe6b9
JG
1024void radeon_atombios_fini(struct radeon_device *rdev)
1025{
4a04a844
JG
1026 if (rdev->mode_info.atom_context) {
1027 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 1028 }
0e34d094
TG
1029 kfree(rdev->mode_info.atom_context);
1030 rdev->mode_info.atom_context = NULL;
61c4b24b 1031 kfree(rdev->mode_info.atom_card_info);
0e34d094 1032 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
1033}
1034
0c195119
AD
1035/* COMBIOS */
1036/*
1037 * COMBIOS is the bios format prior to ATOM. It provides
1038 * command tables similar to ATOM, but doesn't have a unified
1039 * parser. See radeon_combios.c
1040 */
1041
1042/**
1043 * radeon_combios_init - init the driver info for combios
1044 *
1045 * @rdev: radeon_device pointer
1046 *
1047 * Initializes the driver info for combios (r1xx-r3xx).
1048 * Returns 0 on sucess.
1049 * Called at driver startup.
1050 */
771fe6b9
JG
1051int radeon_combios_init(struct radeon_device *rdev)
1052{
1053 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1054 return 0;
1055}
1056
0c195119
AD
1057/**
1058 * radeon_combios_fini - free the driver info for combios
1059 *
1060 * @rdev: radeon_device pointer
1061 *
1062 * Frees the driver info for combios (r1xx-r3xx).
1063 * Called at driver shutdown.
1064 */
771fe6b9
JG
1065void radeon_combios_fini(struct radeon_device *rdev)
1066{
1067}
1068
0c195119
AD
1069/* if we get transitioned to only one device, take VGA back */
1070/**
1071 * radeon_vga_set_decode - enable/disable vga decode
1072 *
1073 * @cookie: radeon_device pointer
1074 * @state: enable/disable vga decode
1075 *
1076 * Enable/disable vga decode (all asics).
1077 * Returns VGA resource flags.
1078 */
28d52043
DA
1079static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1080{
1081 struct radeon_device *rdev = cookie;
28d52043
DA
1082 radeon_vga_set_state(rdev, state);
1083 if (state)
1084 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1085 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1086 else
1087 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1088}
c1176d6f 1089
1bcb04f7
CK
1090/**
1091 * radeon_check_pot_argument - check that argument is a power of two
1092 *
1093 * @arg: value to check
1094 *
1095 * Validates that a certain argument is a power of two (all asics).
1096 * Returns true if argument is valid.
1097 */
1098static bool radeon_check_pot_argument(int arg)
1099{
1100 return (arg & (arg - 1)) == 0;
1101}
1102
5e3c4f90
GG
1103/**
1104 * Determine a sensible default GART size according to ASIC family.
1105 *
1106 * @family ASIC family name
1107 */
1108static int radeon_gart_size_auto(enum radeon_family family)
1109{
1110 /* default to a larger gart size on newer asics */
1111 if (family >= CHIP_TAHITI)
1112 return 2048;
1113 else if (family >= CHIP_RV770)
1114 return 1024;
1115 else
1116 return 512;
1117}
1118
0c195119
AD
1119/**
1120 * radeon_check_arguments - validate module params
1121 *
1122 * @rdev: radeon_device pointer
1123 *
1124 * Validates certain module parameters and updates
1125 * the associated values used by the driver (all asics).
1126 */
1109ca09 1127static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
1128{
1129 /* vramlimit must be a power of two */
1bcb04f7 1130 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
1131 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1132 radeon_vram_limit);
1133 radeon_vram_limit = 0;
36421338 1134 }
1bcb04f7 1135
edcd26e8 1136 if (radeon_gart_size == -1) {
5e3c4f90 1137 radeon_gart_size = radeon_gart_size_auto(rdev->family);
edcd26e8 1138 }
36421338 1139 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 1140 if (radeon_gart_size < 32) {
edcd26e8 1141 dev_warn(rdev->dev, "gart size (%d) too small\n",
36421338 1142 radeon_gart_size);
5e3c4f90 1143 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1bcb04f7 1144 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
1145 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1146 radeon_gart_size);
5e3c4f90 1147 radeon_gart_size = radeon_gart_size_auto(rdev->family);
36421338 1148 }
1bcb04f7
CK
1149 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1150
36421338
JG
1151 /* AGP mode can only be -1, 1, 2, 4, 8 */
1152 switch (radeon_agpmode) {
1153 case -1:
1154 case 0:
1155 case 1:
1156 case 2:
1157 case 4:
1158 case 8:
1159 break;
1160 default:
1161 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1162 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1163 radeon_agpmode = 0;
1164 break;
1165 }
c1c44132
CK
1166
1167 if (!radeon_check_pot_argument(radeon_vm_size)) {
1168 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1169 radeon_vm_size);
20b2656d 1170 radeon_vm_size = 4;
c1c44132
CK
1171 }
1172
20b2656d 1173 if (radeon_vm_size < 1) {
13c240ef 1174 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
c1c44132 1175 radeon_vm_size);
20b2656d 1176 radeon_vm_size = 4;
c1c44132
CK
1177 }
1178
3cf8bb1a
JG
1179 /*
1180 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1181 */
20b2656d
CK
1182 if (radeon_vm_size > 1024) {
1183 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
c1c44132 1184 radeon_vm_size);
20b2656d 1185 radeon_vm_size = 4;
c1c44132 1186 }
4510fb98
CK
1187
1188 /* defines number of bits in page table versus page directory,
1189 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1190 * page table and the remaining bits are in the page directory */
dfc230f9
CK
1191 if (radeon_vm_block_size == -1) {
1192
1193 /* Total bits covered by PD + PTs */
8e66e134 1194 unsigned bits = ilog2(radeon_vm_size) + 18;
dfc230f9
CK
1195
1196 /* Make sure the PD is 4K in size up to 8GB address space.
1197 Above that split equal between PD and PTs */
1198 if (radeon_vm_size <= 8)
1199 radeon_vm_block_size = bits - 9;
1200 else
1201 radeon_vm_block_size = (bits + 3) / 2;
1202
1203 } else if (radeon_vm_block_size < 9) {
20b2656d 1204 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
4510fb98
CK
1205 radeon_vm_block_size);
1206 radeon_vm_block_size = 9;
1207 }
1208
1209 if (radeon_vm_block_size > 24 ||
20b2656d
CK
1210 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1211 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
4510fb98
CK
1212 radeon_vm_block_size);
1213 radeon_vm_block_size = 9;
1214 }
36421338
JG
1215}
1216
0c195119
AD
1217/**
1218 * radeon_switcheroo_set_state - set switcheroo state
1219 *
1220 * @pdev: pci dev pointer
8e5de1d8 1221 * @state: vga_switcheroo state
0c195119
AD
1222 *
1223 * Callback for the switcheroo driver. Suspends or resumes the
1224 * the asics before or after it is powered up using ACPI methods.
1225 */
6a9ee8af
DA
1226static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1227{
1228 struct drm_device *dev = pci_get_drvdata(pdev);
4807c5a8 1229 struct radeon_device *rdev = dev->dev_private;
10ebc0bc 1230
90c4cde9 1231 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
10ebc0bc
DA
1232 return;
1233
6a9ee8af 1234 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
1235 unsigned d3_delay = dev->pdev->d3_delay;
1236
6a9ee8af
DA
1237 printk(KERN_INFO "radeon: switched on\n");
1238 /* don't suspend or resume card normally */
5bcf719b 1239 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e 1240
4807c5a8 1241 if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
d1f9809e
ML
1242 dev->pdev->d3_delay = 20;
1243
10ebc0bc 1244 radeon_resume_kms(dev, true, true);
d1f9809e
ML
1245
1246 dev->pdev->d3_delay = d3_delay;
1247
5bcf719b 1248 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 1249 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
1250 } else {
1251 printk(KERN_INFO "radeon: switched off\n");
fbf81762 1252 drm_kms_helper_poll_disable(dev);
5bcf719b 1253 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
274ad65c 1254 radeon_suspend_kms(dev, true, true, false);
5bcf719b 1255 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
1256 }
1257}
1258
0c195119
AD
1259/**
1260 * radeon_switcheroo_can_switch - see if switcheroo state can change
1261 *
1262 * @pdev: pci dev pointer
1263 *
1264 * Callback for the switcheroo driver. Check of the switcheroo
1265 * state can be changed.
1266 * Returns true if the state can be changed, false if not.
1267 */
6a9ee8af
DA
1268static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1269{
1270 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af 1271
fc8fd40e
DV
1272 /*
1273 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1274 * locking inversion with the driver load path. And the access here is
1275 * completely racy anyway. So don't bother with locking for now.
1276 */
1277 return dev->open_count == 0;
6a9ee8af
DA
1278}
1279
26ec685f
TI
1280static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1281 .set_gpu_state = radeon_switcheroo_set_state,
1282 .reprobe = NULL,
1283 .can_switch = radeon_switcheroo_can_switch,
1284};
6a9ee8af 1285
0c195119
AD
1286/**
1287 * radeon_device_init - initialize the driver
1288 *
1289 * @rdev: radeon_device pointer
1290 * @pdev: drm dev pointer
1291 * @pdev: pci dev pointer
1292 * @flags: driver flags
1293 *
1294 * Initializes the driver info and hw (all asics).
1295 * Returns 0 for success or an error on failure.
1296 * Called at driver startup.
1297 */
771fe6b9
JG
1298int radeon_device_init(struct radeon_device *rdev,
1299 struct drm_device *ddev,
1300 struct pci_dev *pdev,
1301 uint32_t flags)
1302{
351a52a2 1303 int r, i;
ad49f501 1304 int dma_bits;
10ebc0bc 1305 bool runtime = false;
771fe6b9 1306
771fe6b9 1307 rdev->shutdown = false;
9f022ddf 1308 rdev->dev = &pdev->dev;
771fe6b9
JG
1309 rdev->ddev = ddev;
1310 rdev->pdev = pdev;
1311 rdev->flags = flags;
1312 rdev->family = flags & RADEON_FAMILY_MASK;
1313 rdev->is_atom_bios = false;
1314 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
edcd26e8 1315 rdev->mc.gtt_size = 512 * 1024 * 1024;
733289c2 1316 rdev->accel_working = false;
8b25ed34
AD
1317 /* set up ring ids */
1318 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1319 rdev->ring[i].idx = i;
1320 }
954605ca 1321 rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
1b5331d9 1322
fe0d36e0
AD
1323 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1324 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1325 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1b5331d9 1326
771fe6b9
JG
1327 /* mutex initialization are all done here so we
1328 * can recall function without having locking issues */
d6999bc7 1329 mutex_init(&rdev->ring_lock);
40bacf16 1330 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1331 atomic_set(&rdev->ih.lock, 0);
4c788679 1332 mutex_init(&rdev->gem.mutex);
c913e23a 1333 mutex_init(&rdev->pm.mutex);
6759a0a7 1334 mutex_init(&rdev->gpu_clock_mutex);
f61d5b46 1335 mutex_init(&rdev->srbm_mutex);
1c0a4625 1336 mutex_init(&rdev->grbm_idx_mutex);
db7fce39 1337 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1338 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1339 init_waitqueue_head(&rdev->irq.vblank_queue);
341cb9e4
CK
1340 mutex_init(&rdev->mn_lock);
1341 hash_init(rdev->mn_hash);
1b9c3dd0
AD
1342 r = radeon_gem_init(rdev);
1343 if (r)
1344 return r;
529364e0 1345
c1c44132 1346 radeon_check_arguments(rdev);
23d4f1f2 1347 /* Adjust VM size here.
c1c44132 1348 * Max GPUVM size for cayman+ is 40 bits.
23d4f1f2 1349 */
20b2656d 1350 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
771fe6b9 1351
4aac0473
JG
1352 /* Set asic functions */
1353 r = radeon_asic_init(rdev);
36421338 1354 if (r)
4aac0473 1355 return r;
4aac0473 1356
f95df9ca
AD
1357 /* all of the newer IGP chips have an internal gart
1358 * However some rs4xx report as AGP, so remove that here.
1359 */
1360 if ((rdev->family >= CHIP_RS400) &&
1361 (rdev->flags & RADEON_IS_IGP)) {
1362 rdev->flags &= ~RADEON_IS_AGP;
1363 }
1364
30256a3f 1365 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1366 radeon_agp_disable(rdev);
771fe6b9
JG
1367 }
1368
9ed8b1f9
AD
1369 /* Set the internal MC address mask
1370 * This is the max address of the GPU's
1371 * internal address space.
1372 */
1373 if (rdev->family >= CHIP_CAYMAN)
1374 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1375 else if (rdev->family >= CHIP_CEDAR)
1376 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1377 else
1378 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1379
ad49f501
DA
1380 /* set DMA mask + need_dma32 flags.
1381 * PCIE - can handle 40-bits.
005a83f1 1382 * IGP - can handle 40-bits
ad49f501 1383 * AGP - generally dma32 is safest
005a83f1 1384 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1385 */
1386 rdev->need_dma32 = false;
1387 if (rdev->flags & RADEON_IS_AGP)
1388 rdev->need_dma32 = true;
005a83f1 1389 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1390 (rdev->family <= CHIP_RS740))
ad49f501
DA
1391 rdev->need_dma32 = true;
1392
1393 dma_bits = rdev->need_dma32 ? 32 : 40;
1394 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1395 if (r) {
62fff811 1396 rdev->need_dma32 = true;
c52494f6 1397 dma_bits = 32;
771fe6b9
JG
1398 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1399 }
c52494f6
KRW
1400 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1401 if (r) {
1402 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1403 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1404 }
771fe6b9
JG
1405
1406 /* Registers mapping */
1407 /* TODO: block userspace mapping of io register */
2c385151 1408 spin_lock_init(&rdev->mmio_idx_lock);
fe78118c 1409 spin_lock_init(&rdev->smc_idx_lock);
0a5b7b0b
AD
1410 spin_lock_init(&rdev->pll_idx_lock);
1411 spin_lock_init(&rdev->mc_idx_lock);
1412 spin_lock_init(&rdev->pcie_idx_lock);
1413 spin_lock_init(&rdev->pciep_idx_lock);
1414 spin_lock_init(&rdev->pif_idx_lock);
1415 spin_lock_init(&rdev->cg_idx_lock);
1416 spin_lock_init(&rdev->uvd_idx_lock);
1417 spin_lock_init(&rdev->rcu_idx_lock);
1418 spin_lock_init(&rdev->didt_idx_lock);
1419 spin_lock_init(&rdev->end_idx_lock);
efad86db
AD
1420 if (rdev->family >= CHIP_BONAIRE) {
1421 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1422 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1423 } else {
1424 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1425 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1426 }
771fe6b9
JG
1427 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1428 if (rdev->rmmio == NULL) {
1429 return -ENOMEM;
1430 }
1431 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1432 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1433
75efdee1
AD
1434 /* doorbell bar mapping */
1435 if (rdev->family >= CHIP_BONAIRE)
1436 radeon_doorbell_init(rdev);
1437
351a52a2
AD
1438 /* io port mapping */
1439 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1440 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1441 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1442 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1443 break;
1444 }
1445 }
1446 if (rdev->rio_mem == NULL)
1447 DRM_ERROR("Unable to find PCI I/O BAR\n");
1448
4807c5a8
AD
1449 if (rdev->flags & RADEON_IS_PX)
1450 radeon_device_handle_px_quirks(rdev);
1451
28d52043 1452 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1453 /* this will fail for cards that aren't VGA class devices, just
1454 * ignore it */
1455 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
10ebc0bc 1456
bfaddd9f 1457 if (rdev->flags & RADEON_IS_PX)
10ebc0bc
DA
1458 runtime = true;
1459 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1460 if (runtime)
1461 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
28d52043 1462
3ce0a23d 1463 r = radeon_init(rdev);
b574f251 1464 if (r)
2e97140d 1465 goto failed;
3ce0a23d 1466
409851f4
JG
1467 r = radeon_gem_debugfs_init(rdev);
1468 if (r) {
1469 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
9843ead0
DA
1470 }
1471
1472 r = radeon_mst_debugfs_init(rdev);
1473 if (r) {
1474 DRM_ERROR("registering mst debugfs failed (%d).\n", r);
409851f4
JG
1475 }
1476
b574f251
JG
1477 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1478 /* Acceleration not working on AGP card try again
1479 * with fallback to PCI or PCIE GART
1480 */
a2d07b74 1481 radeon_asic_reset(rdev);
b574f251
JG
1482 radeon_fini(rdev);
1483 radeon_agp_disable(rdev);
1484 r = radeon_init(rdev);
4aac0473 1485 if (r)
2e97140d 1486 goto failed;
771fe6b9 1487 }
6c7bccea 1488
13a7d299
CK
1489 r = radeon_ib_ring_tests(rdev);
1490 if (r)
1491 DRM_ERROR("ib ring test failed (%d).\n", r);
1492
6dfd1972
JG
1493 /*
1494 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
1495 * after the CP ring have chew one packet at least. Hence here we stop
1496 * and restart DPM after the radeon_ib_ring_tests().
1497 */
1498 if (rdev->pm.dpm_enabled &&
1499 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1500 (rdev->family == CHIP_TURKS) &&
1501 (rdev->flags & RADEON_IS_MOBILITY)) {
1502 mutex_lock(&rdev->pm.mutex);
1503 radeon_dpm_disable(rdev);
1504 radeon_dpm_enable(rdev);
1505 mutex_unlock(&rdev->pm.mutex);
1506 }
1507
60a7e396 1508 if ((radeon_testing & 1)) {
4a1132a0
AD
1509 if (rdev->accel_working)
1510 radeon_test_moves(rdev);
1511 else
1512 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
ecc0b326 1513 }
60a7e396 1514 if ((radeon_testing & 2)) {
4a1132a0
AD
1515 if (rdev->accel_working)
1516 radeon_test_syncing(rdev);
1517 else
1518 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
60a7e396 1519 }
771fe6b9 1520 if (radeon_benchmarking) {
4a1132a0
AD
1521 if (rdev->accel_working)
1522 radeon_benchmark(rdev, radeon_benchmarking);
1523 else
1524 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
771fe6b9 1525 }
6cf8a3f5 1526 return 0;
2e97140d
AD
1527
1528failed:
1529 if (runtime)
1530 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1531 return r;
771fe6b9
JG
1532}
1533
4d8bf9ae
CK
1534static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1535
0c195119
AD
1536/**
1537 * radeon_device_fini - tear down the driver
1538 *
1539 * @rdev: radeon_device pointer
1540 *
1541 * Tear down the driver info (all asics).
1542 * Called at driver shutdown.
1543 */
771fe6b9
JG
1544void radeon_device_fini(struct radeon_device *rdev)
1545{
771fe6b9
JG
1546 DRM_INFO("radeon: finishing device.\n");
1547 rdev->shutdown = true;
90aca4d2
JG
1548 /* evict vram memory */
1549 radeon_bo_evict_vram(rdev);
62a8ea3f 1550 radeon_fini(rdev);
6a9ee8af 1551 vga_switcheroo_unregister_client(rdev->pdev);
2e97140d
AD
1552 if (rdev->flags & RADEON_IS_PX)
1553 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
c1176d6f 1554 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1555 if (rdev->rio_mem)
1556 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1557 rdev->rio_mem = NULL;
771fe6b9
JG
1558 iounmap(rdev->rmmio);
1559 rdev->rmmio = NULL;
75efdee1
AD
1560 if (rdev->family >= CHIP_BONAIRE)
1561 radeon_doorbell_fini(rdev);
4d8bf9ae 1562 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1563}
1564
1565
1566/*
1567 * Suspend & resume.
1568 */
0c195119
AD
1569/**
1570 * radeon_suspend_kms - initiate device suspend
1571 *
1572 * @pdev: drm dev pointer
1573 * @state: suspend state
1574 *
1575 * Puts the hw in the suspend state (all asics).
1576 * Returns 0 for success or an error on failure.
1577 * Called at driver suspend.
1578 */
274ad65c
JG
1579int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1580 bool fbcon, bool freeze)
771fe6b9 1581{
875c1866 1582 struct radeon_device *rdev;
771fe6b9 1583 struct drm_crtc *crtc;
d8dcaa1d 1584 struct drm_connector *connector;
7465280c 1585 int i, r;
771fe6b9 1586
875c1866 1587 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1588 return -ENODEV;
1589 }
7473e830 1590
875c1866
DJ
1591 rdev = dev->dev_private;
1592
5bcf719b 1593 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1594 return 0;
d8dcaa1d 1595
86698c20
SF
1596 drm_kms_helper_poll_disable(dev);
1597
6adaed5b 1598 drm_modeset_lock_all(dev);
d8dcaa1d
AD
1599 /* turn off display hw */
1600 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1601 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1602 }
6adaed5b 1603 drm_modeset_unlock_all(dev);
d8dcaa1d 1604
f3cbb17b 1605 /* unpin the front buffers and cursors */
771fe6b9 1606 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
f3cbb17b 1607 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
f4510a27 1608 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
4c788679 1609 struct radeon_bo *robj;
771fe6b9 1610
f3cbb17b
GG
1611 if (radeon_crtc->cursor_bo) {
1612 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1613 r = radeon_bo_reserve(robj, false);
1614 if (r == 0) {
1615 radeon_bo_unpin(robj);
1616 radeon_bo_unreserve(robj);
1617 }
1618 }
1619
771fe6b9
JG
1620 if (rfb == NULL || rfb->obj == NULL) {
1621 continue;
1622 }
7e4d15d9 1623 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1624 /* don't unpin kernel fb objects */
1625 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1626 r = radeon_bo_reserve(robj, false);
38651674 1627 if (r == 0) {
4c788679
JG
1628 radeon_bo_unpin(robj);
1629 radeon_bo_unreserve(robj);
1630 }
771fe6b9
JG
1631 }
1632 }
1633 /* evict vram memory */
4c788679 1634 radeon_bo_evict_vram(rdev);
8a47cc9e 1635
771fe6b9 1636 /* wait for gpu to finish processing current batch */
5f8f635e 1637 for (i = 0; i < RADEON_NUM_RINGS; i++) {
37615527 1638 r = radeon_fence_wait_empty(rdev, i);
5f8f635e
JG
1639 if (r) {
1640 /* delay GPU reset to resume */
eb98c709 1641 radeon_fence_driver_force_completion(rdev, i);
5f8f635e
JG
1642 }
1643 }
771fe6b9 1644
f657c2a7
YZ
1645 radeon_save_bios_scratch_regs(rdev);
1646
62a8ea3f 1647 radeon_suspend(rdev);
d4877cf2 1648 radeon_hpd_fini(rdev);
771fe6b9 1649 /* evict remaining vram memory */
4c788679 1650 radeon_bo_evict_vram(rdev);
771fe6b9 1651
10b06122
JG
1652 radeon_agp_suspend(rdev);
1653
771fe6b9 1654 pci_save_state(dev->pdev);
ccaa2c12 1655 if (freeze && rdev->family >= CHIP_CEDAR) {
274ad65c
JG
1656 rdev->asic->asic_reset(rdev, true);
1657 pci_restore_state(dev->pdev);
1658 } else if (suspend) {
771fe6b9
JG
1659 /* Shut down the device */
1660 pci_disable_device(dev->pdev);
1661 pci_set_power_state(dev->pdev, PCI_D3hot);
1662 }
10ebc0bc
DA
1663
1664 if (fbcon) {
1665 console_lock();
1666 radeon_fbdev_set_suspend(rdev, 1);
1667 console_unlock();
1668 }
771fe6b9
JG
1669 return 0;
1670}
1671
0c195119
AD
1672/**
1673 * radeon_resume_kms - initiate device resume
1674 *
1675 * @pdev: drm dev pointer
1676 *
1677 * Bring the hw back to operating state (all asics).
1678 * Returns 0 for success or an error on failure.
1679 * Called at driver resume.
1680 */
10ebc0bc 1681int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
771fe6b9 1682{
09bdf591 1683 struct drm_connector *connector;
771fe6b9 1684 struct radeon_device *rdev = dev->dev_private;
f3cbb17b 1685 struct drm_crtc *crtc;
04eb2206 1686 int r;
771fe6b9 1687
5bcf719b 1688 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1689 return 0;
1690
10ebc0bc
DA
1691 if (fbcon) {
1692 console_lock();
1693 }
7473e830
DA
1694 if (resume) {
1695 pci_set_power_state(dev->pdev, PCI_D0);
1696 pci_restore_state(dev->pdev);
1697 if (pci_enable_device(dev->pdev)) {
10ebc0bc
DA
1698 if (fbcon)
1699 console_unlock();
7473e830
DA
1700 return -1;
1701 }
771fe6b9 1702 }
0ebf1717
DA
1703 /* resume AGP if in use */
1704 radeon_agp_resume(rdev);
62a8ea3f 1705 radeon_resume(rdev);
04eb2206
CK
1706
1707 r = radeon_ib_ring_tests(rdev);
1708 if (r)
1709 DRM_ERROR("ib ring test failed (%d).\n", r);
1710
bc6a6295 1711 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
6c7bccea
AD
1712 /* do dpm late init */
1713 r = radeon_pm_late_init(rdev);
1714 if (r) {
1715 rdev->pm.dpm_enabled = false;
1716 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1717 }
bc6a6295
AD
1718 } else {
1719 /* resume old pm late */
1720 radeon_pm_resume(rdev);
6c7bccea
AD
1721 }
1722
f657c2a7 1723 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1724
f3cbb17b
GG
1725 /* pin cursors */
1726 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1727 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1728
1729 if (radeon_crtc->cursor_bo) {
1730 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1731 r = radeon_bo_reserve(robj, false);
1732 if (r == 0) {
1733 /* Only 27 bit offset for legacy cursor */
1734 r = radeon_bo_pin_restricted(robj,
1735 RADEON_GEM_DOMAIN_VRAM,
1736 ASIC_IS_AVIVO(rdev) ?
1737 0 : 1 << 27,
1738 &radeon_crtc->cursor_addr);
1739 if (r != 0)
1740 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1741 radeon_bo_unreserve(robj);
1742 }
1743 }
1744 }
1745
3fa47d9e
AD
1746 /* init dig PHYs, disp eng pll */
1747 if (rdev->is_atom_bios) {
ac89af1e 1748 radeon_atom_encoder_init(rdev);
f3f1f03e 1749 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1750 /* turn on the BL */
1751 if (rdev->mode_info.bl_encoder) {
1752 u8 bl_level = radeon_get_backlight_level(rdev,
1753 rdev->mode_info.bl_encoder);
1754 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1755 bl_level);
1756 }
3fa47d9e 1757 }
d4877cf2
AD
1758 /* reset hpd state */
1759 radeon_hpd_init(rdev);
771fe6b9 1760 /* blat the mode back in */
ec9954fc
DA
1761 if (fbcon) {
1762 drm_helper_resume_force_mode(dev);
1763 /* turn on display hw */
6adaed5b 1764 drm_modeset_lock_all(dev);
ec9954fc
DA
1765 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1766 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1767 }
6adaed5b 1768 drm_modeset_unlock_all(dev);
a93f344d 1769 }
86698c20
SF
1770
1771 drm_kms_helper_poll_enable(dev);
18ee37a4 1772
3640da2f
AD
1773 /* set the power state here in case we are a PX system or headless */
1774 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1775 radeon_pm_compute_clocks(rdev);
1776
18ee37a4
DV
1777 if (fbcon) {
1778 radeon_fbdev_set_suspend(rdev, 0);
1779 console_unlock();
1780 }
1781
771fe6b9
JG
1782 return 0;
1783}
1784
0c195119
AD
1785/**
1786 * radeon_gpu_reset - reset the asic
1787 *
1788 * @rdev: radeon device pointer
1789 *
1790 * Attempt the reset the GPU if it has hung (all asics).
1791 * Returns 0 for success or an error on failure.
1792 */
90aca4d2
JG
1793int radeon_gpu_reset(struct radeon_device *rdev)
1794{
55d7c221
CK
1795 unsigned ring_sizes[RADEON_NUM_RINGS];
1796 uint32_t *ring_data[RADEON_NUM_RINGS];
1797
1798 bool saved = false;
1799
1800 int i, r;
8fd1b84c 1801 int resched;
90aca4d2 1802
dee53e7f 1803 down_write(&rdev->exclusive_lock);
f9eaf9ae
CK
1804
1805 if (!rdev->needs_reset) {
1806 up_write(&rdev->exclusive_lock);
1807 return 0;
1808 }
1809
72b9076b
MO
1810 atomic_inc(&rdev->gpu_reset_counter);
1811
90aca4d2 1812 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1813 /* block TTM */
1814 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2 1815 radeon_suspend(rdev);
73ef0e0d 1816 radeon_hpd_fini(rdev);
90aca4d2 1817
55d7c221
CK
1818 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1819 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1820 &ring_data[i]);
1821 if (ring_sizes[i]) {
1822 saved = true;
1823 dev_info(rdev->dev, "Saved %d dwords of commands "
1824 "on ring %d.\n", ring_sizes[i], i);
1825 }
1826 }
1827
90aca4d2
JG
1828 r = radeon_asic_reset(rdev);
1829 if (!r) {
55d7c221 1830 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1831 radeon_resume(rdev);
55d7c221 1832 }
04eb2206 1833
55d7c221 1834 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1835
9bb39ff4
ML
1836 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1837 if (!r && ring_data[i]) {
55d7c221
CK
1838 radeon_ring_restore(rdev, &rdev->ring[i],
1839 ring_sizes[i], ring_data[i]);
9bb39ff4 1840 } else {
eb98c709 1841 radeon_fence_driver_force_completion(rdev, i);
55d7c221
CK
1842 kfree(ring_data[i]);
1843 }
90aca4d2 1844 }
7a1619b9 1845
c940b447
AD
1846 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1847 /* do dpm late init */
1848 r = radeon_pm_late_init(rdev);
1849 if (r) {
1850 rdev->pm.dpm_enabled = false;
1851 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1852 }
1853 } else {
1854 /* resume old pm late */
1855 radeon_pm_resume(rdev);
1856 }
1857
73ef0e0d
AD
1858 /* init dig PHYs, disp eng pll */
1859 if (rdev->is_atom_bios) {
1860 radeon_atom_encoder_init(rdev);
1861 radeon_atom_disp_eng_pll_init(rdev);
1862 /* turn on the BL */
1863 if (rdev->mode_info.bl_encoder) {
1864 u8 bl_level = radeon_get_backlight_level(rdev,
1865 rdev->mode_info.bl_encoder);
1866 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1867 bl_level);
1868 }
1869 }
1870 /* reset hpd state */
1871 radeon_hpd_init(rdev);
1872
9bb39ff4 1873 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
3c036389
CK
1874
1875 rdev->in_reset = true;
1876 rdev->needs_reset = false;
1877
9bb39ff4
ML
1878 downgrade_write(&rdev->exclusive_lock);
1879
d3493574
JG
1880 drm_helper_resume_force_mode(rdev->ddev);
1881
c940b447
AD
1882 /* set the power state here in case we are a PX system or headless */
1883 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1884 radeon_pm_compute_clocks(rdev);
1885
9bb39ff4
ML
1886 if (!r) {
1887 r = radeon_ib_ring_tests(rdev);
1888 if (r && saved)
1889 r = -EAGAIN;
1890 } else {
7a1619b9
MD
1891 /* bad news, how to tell it to userspace ? */
1892 dev_info(rdev->dev, "GPU reset failed\n");
1893 }
1894
9bb39ff4
ML
1895 rdev->needs_reset = r == -EAGAIN;
1896 rdev->in_reset = false;
1897
1898 up_read(&rdev->exclusive_lock);
90aca4d2
JG
1899 return r;
1900}
1901
771fe6b9
JG
1902
1903/*
1904 * Debugfs
1905 */
771fe6b9
JG
1906int radeon_debugfs_add_files(struct radeon_device *rdev,
1907 struct drm_info_list *files,
1908 unsigned nfiles)
1909{
1910 unsigned i;
1911
4d8bf9ae
CK
1912 for (i = 0; i < rdev->debugfs_count; i++) {
1913 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1914 /* Already registered */
1915 return 0;
1916 }
1917 }
c245cb9e 1918
4d8bf9ae 1919 i = rdev->debugfs_count + 1;
c245cb9e
MW
1920 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1921 DRM_ERROR("Reached maximum number of debugfs components.\n");
1922 DRM_ERROR("Report so we increase "
3cf8bb1a 1923 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1924 return -EINVAL;
1925 }
4d8bf9ae
CK
1926 rdev->debugfs[rdev->debugfs_count].files = files;
1927 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1928 rdev->debugfs_count = i;
771fe6b9
JG
1929#if defined(CONFIG_DEBUG_FS)
1930 drm_debugfs_create_files(files, nfiles,
1931 rdev->ddev->control->debugfs_root,
1932 rdev->ddev->control);
1933 drm_debugfs_create_files(files, nfiles,
1934 rdev->ddev->primary->debugfs_root,
1935 rdev->ddev->primary);
1936#endif
1937 return 0;
1938}
1939
4d8bf9ae
CK
1940static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1941{
1942#if defined(CONFIG_DEBUG_FS)
1943 unsigned i;
1944
1945 for (i = 0; i < rdev->debugfs_count; i++) {
1946 drm_debugfs_remove_files(rdev->debugfs[i].files,
1947 rdev->debugfs[i].num_files,
1948 rdev->ddev->control);
1949 drm_debugfs_remove_files(rdev->debugfs[i].files,
1950 rdev->debugfs[i].num_files,
1951 rdev->ddev->primary);
1952 }
1953#endif
1954}
1955
771fe6b9
JG
1956#if defined(CONFIG_DEBUG_FS)
1957int radeon_debugfs_init(struct drm_minor *minor)
1958{
1959 return 0;
1960}
1961
1962void radeon_debugfs_cleanup(struct drm_minor *minor)
1963{
771fe6b9
JG
1964}
1965#endif