]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/gpu/drm/radeon/radeon_device.c
drm: kill drm_bus->bus_type
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
624d3524 96 "OLAND",
b5d9d726 97 "HAINAN",
6eac752e
AD
98 "BONAIRE",
99 "KAVERI",
100 "KABINI",
3bf599e8 101 "HAWAII",
1b5331d9
JG
102 "LAST",
103};
104
90c4cde9
AD
105bool radeon_is_px(struct drm_device *dev)
106{
107 struct radeon_device *rdev = dev->dev_private;
108
109 if (rdev->flags & RADEON_IS_PX)
110 return true;
111 return false;
112}
10ebc0bc 113
2e1b65f9
AD
114/**
115 * radeon_program_register_sequence - program an array of registers.
116 *
117 * @rdev: radeon_device pointer
118 * @registers: pointer to the register array
119 * @array_size: size of the register array
120 *
121 * Programs an array or registers with and and or masks.
122 * This is a helper for setting golden registers.
123 */
124void radeon_program_register_sequence(struct radeon_device *rdev,
125 const u32 *registers,
126 const u32 array_size)
127{
128 u32 tmp, reg, and_mask, or_mask;
129 int i;
130
131 if (array_size % 3)
132 return;
133
134 for (i = 0; i < array_size; i +=3) {
135 reg = registers[i + 0];
136 and_mask = registers[i + 1];
137 or_mask = registers[i + 2];
138
139 if (and_mask == 0xffffffff) {
140 tmp = or_mask;
141 } else {
142 tmp = RREG32(reg);
143 tmp &= ~and_mask;
144 tmp |= or_mask;
145 }
146 WREG32(reg, tmp);
147 }
148}
149
1a0041b8
AD
150void radeon_pci_config_reset(struct radeon_device *rdev)
151{
152 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
153}
154
0c195119
AD
155/**
156 * radeon_surface_init - Clear GPU surface registers.
157 *
158 * @rdev: radeon_device pointer
159 *
160 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 161 */
3ce0a23d 162void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
163{
164 /* FIXME: check this out */
165 if (rdev->family < CHIP_R600) {
166 int i;
167
550e2d92
DA
168 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
169 if (rdev->surface_regs[i].bo)
170 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
171 else
172 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 173 }
e024e110
DA
174 /* enable surfaces */
175 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
176 }
177}
178
771fe6b9
JG
179/*
180 * GPU scratch registers helpers function.
181 */
0c195119
AD
182/**
183 * radeon_scratch_init - Init scratch register driver information.
184 *
185 * @rdev: radeon_device pointer
186 *
187 * Init CP scratch register driver information (r1xx-r5xx)
188 */
3ce0a23d 189void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
190{
191 int i;
192
193 /* FIXME: check this out */
194 if (rdev->family < CHIP_R300) {
195 rdev->scratch.num_reg = 5;
196 } else {
197 rdev->scratch.num_reg = 7;
198 }
724c80e1 199 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
200 for (i = 0; i < rdev->scratch.num_reg; i++) {
201 rdev->scratch.free[i] = true;
724c80e1 202 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
203 }
204}
205
0c195119
AD
206/**
207 * radeon_scratch_get - Allocate a scratch register
208 *
209 * @rdev: radeon_device pointer
210 * @reg: scratch register mmio offset
211 *
212 * Allocate a CP scratch register for use by the driver (all asics).
213 * Returns 0 on success or -EINVAL on failure.
214 */
771fe6b9
JG
215int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
216{
217 int i;
218
219 for (i = 0; i < rdev->scratch.num_reg; i++) {
220 if (rdev->scratch.free[i]) {
221 rdev->scratch.free[i] = false;
222 *reg = rdev->scratch.reg[i];
223 return 0;
224 }
225 }
226 return -EINVAL;
227}
228
0c195119
AD
229/**
230 * radeon_scratch_free - Free a scratch register
231 *
232 * @rdev: radeon_device pointer
233 * @reg: scratch register mmio offset
234 *
235 * Free a CP scratch register allocated for use by the driver (all asics)
236 */
771fe6b9
JG
237void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
238{
239 int i;
240
241 for (i = 0; i < rdev->scratch.num_reg; i++) {
242 if (rdev->scratch.reg[i] == reg) {
243 rdev->scratch.free[i] = true;
244 return;
245 }
246 }
247}
248
75efdee1
AD
249/*
250 * GPU doorbell aperture helpers function.
251 */
252/**
253 * radeon_doorbell_init - Init doorbell driver information.
254 *
255 * @rdev: radeon_device pointer
256 *
257 * Init doorbell driver information (CIK)
258 * Returns 0 on success, error on failure.
259 */
28f5a6cd 260static int radeon_doorbell_init(struct radeon_device *rdev)
75efdee1 261{
75efdee1
AD
262 /* doorbell bar mapping */
263 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
264 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
265
d5754ab8
AL
266 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
267 if (rdev->doorbell.num_doorbells == 0)
268 return -EINVAL;
75efdee1 269
d5754ab8 270 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
75efdee1
AD
271 if (rdev->doorbell.ptr == NULL) {
272 return -ENOMEM;
273 }
274 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
275 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
276
d5754ab8 277 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
75efdee1 278
75efdee1
AD
279 return 0;
280}
281
282/**
283 * radeon_doorbell_fini - Tear down doorbell driver information.
284 *
285 * @rdev: radeon_device pointer
286 *
287 * Tear down doorbell driver information (CIK)
288 */
28f5a6cd 289static void radeon_doorbell_fini(struct radeon_device *rdev)
75efdee1
AD
290{
291 iounmap(rdev->doorbell.ptr);
292 rdev->doorbell.ptr = NULL;
293}
294
295/**
d5754ab8 296 * radeon_doorbell_get - Allocate a doorbell entry
75efdee1
AD
297 *
298 * @rdev: radeon_device pointer
d5754ab8 299 * @doorbell: doorbell index
75efdee1 300 *
d5754ab8 301 * Allocate a doorbell for use by the driver (all asics).
75efdee1
AD
302 * Returns 0 on success or -EINVAL on failure.
303 */
304int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
305{
d5754ab8
AL
306 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
307 if (offset < rdev->doorbell.num_doorbells) {
308 __set_bit(offset, rdev->doorbell.used);
309 *doorbell = offset;
310 return 0;
311 } else {
312 return -EINVAL;
75efdee1 313 }
75efdee1
AD
314}
315
316/**
d5754ab8 317 * radeon_doorbell_free - Free a doorbell entry
75efdee1
AD
318 *
319 * @rdev: radeon_device pointer
d5754ab8 320 * @doorbell: doorbell index
75efdee1 321 *
d5754ab8 322 * Free a doorbell allocated for use by the driver (all asics)
75efdee1
AD
323 */
324void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
325{
d5754ab8
AL
326 if (doorbell < rdev->doorbell.num_doorbells)
327 __clear_bit(doorbell, rdev->doorbell.used);
75efdee1
AD
328}
329
0c195119
AD
330/*
331 * radeon_wb_*()
332 * Writeback is the the method by which the the GPU updates special pages
333 * in memory with the status of certain GPU events (fences, ring pointers,
334 * etc.).
335 */
336
337/**
338 * radeon_wb_disable - Disable Writeback
339 *
340 * @rdev: radeon_device pointer
341 *
342 * Disables Writeback (all asics). Used for suspend.
343 */
724c80e1
AD
344void radeon_wb_disable(struct radeon_device *rdev)
345{
724c80e1
AD
346 rdev->wb.enabled = false;
347}
348
0c195119
AD
349/**
350 * radeon_wb_fini - Disable Writeback and free memory
351 *
352 * @rdev: radeon_device pointer
353 *
354 * Disables Writeback and frees the Writeback memory (all asics).
355 * Used at driver shutdown.
356 */
724c80e1
AD
357void radeon_wb_fini(struct radeon_device *rdev)
358{
359 radeon_wb_disable(rdev);
360 if (rdev->wb.wb_obj) {
089920f2
JG
361 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
362 radeon_bo_kunmap(rdev->wb.wb_obj);
363 radeon_bo_unpin(rdev->wb.wb_obj);
364 radeon_bo_unreserve(rdev->wb.wb_obj);
365 }
724c80e1
AD
366 radeon_bo_unref(&rdev->wb.wb_obj);
367 rdev->wb.wb = NULL;
368 rdev->wb.wb_obj = NULL;
369 }
370}
371
0c195119
AD
372/**
373 * radeon_wb_init- Init Writeback driver info and allocate memory
374 *
375 * @rdev: radeon_device pointer
376 *
377 * Disables Writeback and frees the Writeback memory (all asics).
378 * Used at driver startup.
379 * Returns 0 on success or an -error on failure.
380 */
724c80e1
AD
381int radeon_wb_init(struct radeon_device *rdev)
382{
383 int r;
384
385 if (rdev->wb.wb_obj == NULL) {
441921d5 386 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
40f5cf99 387 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
724c80e1
AD
388 if (r) {
389 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
390 return r;
391 }
089920f2
JG
392 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
393 if (unlikely(r != 0)) {
394 radeon_wb_fini(rdev);
395 return r;
396 }
397 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
398 &rdev->wb.gpu_addr);
399 if (r) {
400 radeon_bo_unreserve(rdev->wb.wb_obj);
401 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
402 radeon_wb_fini(rdev);
403 return r;
404 }
405 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
724c80e1 406 radeon_bo_unreserve(rdev->wb.wb_obj);
089920f2
JG
407 if (r) {
408 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
409 radeon_wb_fini(rdev);
410 return r;
411 }
724c80e1
AD
412 }
413
e6ba7599
AD
414 /* clear wb memory */
415 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
416 /* disable event_write fences */
417 rdev->wb.use_event = false;
724c80e1 418 /* disabled via module param */
3b7a2b24 419 if (radeon_no_wb == 1) {
724c80e1 420 rdev->wb.enabled = false;
3b7a2b24 421 } else {
724c80e1 422 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
423 /* often unreliable on AGP */
424 rdev->wb.enabled = false;
425 } else if (rdev->family < CHIP_R300) {
426 /* often unreliable on pre-r300 */
724c80e1 427 rdev->wb.enabled = false;
d0f8a854 428 } else {
724c80e1 429 rdev->wb.enabled = true;
d0f8a854 430 /* event_write fences are only available on r600+ */
3b7a2b24 431 if (rdev->family >= CHIP_R600) {
d0f8a854 432 rdev->wb.use_event = true;
3b7a2b24 433 }
d0f8a854 434 }
724c80e1 435 }
c994ead6
AD
436 /* always use writeback/events on NI, APUs */
437 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
438 rdev->wb.enabled = true;
439 rdev->wb.use_event = true;
440 }
724c80e1
AD
441
442 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
443
444 return 0;
445}
446
d594e46a
JG
447/**
448 * radeon_vram_location - try to find VRAM location
449 * @rdev: radeon device structure holding all necessary informations
450 * @mc: memory controller structure holding memory informations
451 * @base: base address at which to put VRAM
452 *
453 * Function will place try to place VRAM at base address provided
454 * as parameter (which is so far either PCI aperture address or
455 * for IGP TOM base address).
456 *
457 * If there is not enough space to fit the unvisible VRAM in the 32bits
458 * address space then we limit the VRAM size to the aperture.
459 *
460 * If we are using AGP and if the AGP aperture doesn't allow us to have
461 * room for all the VRAM than we restrict the VRAM to the PCI aperture
462 * size and print a warning.
463 *
464 * This function will never fails, worst case are limiting VRAM.
465 *
466 * Note: GTT start, end, size should be initialized before calling this
467 * function on AGP platform.
468 *
25985edc 469 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
470 * this shouldn't be a problem as we are using the PCI aperture as a reference.
471 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
472 * not IGP.
473 *
474 * Note: we use mc_vram_size as on some board we need to program the mc to
475 * cover the whole aperture even if VRAM size is inferior to aperture size
476 * Novell bug 204882 + along with lots of ubuntu ones
477 *
478 * Note: when limiting vram it's safe to overwritte real_vram_size because
479 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
480 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
481 * ones)
482 *
483 * Note: IGP TOM addr should be the same as the aperture addr, we don't
484 * explicitly check for that thought.
485 *
486 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 487 */
d594e46a 488void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 489{
1bcb04f7
CK
490 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
491
d594e46a 492 mc->vram_start = base;
9ed8b1f9 493 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
494 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
495 mc->real_vram_size = mc->aper_size;
496 mc->mc_vram_size = mc->aper_size;
497 }
498 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 499 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
500 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
501 mc->real_vram_size = mc->aper_size;
502 mc->mc_vram_size = mc->aper_size;
503 }
504 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
505 if (limit && limit < mc->real_vram_size)
506 mc->real_vram_size = limit;
dd7cc55a 507 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
508 mc->mc_vram_size >> 20, mc->vram_start,
509 mc->vram_end, mc->real_vram_size >> 20);
510}
771fe6b9 511
d594e46a
JG
512/**
513 * radeon_gtt_location - try to find GTT location
514 * @rdev: radeon device structure holding all necessary informations
515 * @mc: memory controller structure holding memory informations
516 *
517 * Function will place try to place GTT before or after VRAM.
518 *
519 * If GTT size is bigger than space left then we ajust GTT size.
520 * Thus function will never fails.
521 *
522 * FIXME: when reducing GTT size align new size on power of 2.
523 */
524void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
525{
526 u64 size_af, size_bf;
527
9ed8b1f9 528 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 529 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
530 if (size_bf > size_af) {
531 if (mc->gtt_size > size_bf) {
532 dev_warn(rdev->dev, "limiting GTT\n");
533 mc->gtt_size = size_bf;
771fe6b9 534 }
8d369bb1 535 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 536 } else {
d594e46a
JG
537 if (mc->gtt_size > size_af) {
538 dev_warn(rdev->dev, "limiting GTT\n");
539 mc->gtt_size = size_af;
540 }
8d369bb1 541 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 542 }
d594e46a 543 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 544 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 545 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
546}
547
771fe6b9
JG
548/*
549 * GPU helpers function.
550 */
0c195119
AD
551/**
552 * radeon_card_posted - check if the hw has already been initialized
553 *
554 * @rdev: radeon_device pointer
555 *
556 * Check if the asic has been initialized (all asics).
557 * Used at driver startup.
558 * Returns true if initialized or false if not.
559 */
9f022ddf 560bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
561{
562 uint32_t reg;
563
50a583f6 564 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
83e68189 565 if (efi_enabled(EFI_BOOT) &&
50a583f6
AD
566 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
567 (rdev->family < CHIP_R600))
bcc65fd8
MG
568 return false;
569
2cf3a4fc
AD
570 if (ASIC_IS_NODCE(rdev))
571 goto check_memsize;
572
771fe6b9 573 /* first check CRTCs */
09fb8bd1 574 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
575 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
576 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
577 if (rdev->num_crtc >= 4) {
578 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
579 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
580 }
581 if (rdev->num_crtc >= 6) {
582 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
583 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
584 }
bcc1c2a1
AD
585 if (reg & EVERGREEN_CRTC_MASTER_EN)
586 return true;
587 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
588 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
589 RREG32(AVIVO_D2CRTC_CONTROL);
590 if (reg & AVIVO_CRTC_EN) {
591 return true;
592 }
593 } else {
594 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
595 RREG32(RADEON_CRTC2_GEN_CNTL);
596 if (reg & RADEON_CRTC_EN) {
597 return true;
598 }
599 }
600
2cf3a4fc 601check_memsize:
771fe6b9
JG
602 /* then check MEM_SIZE, in case the crtcs are off */
603 if (rdev->family >= CHIP_R600)
604 reg = RREG32(R600_CONFIG_MEMSIZE);
605 else
606 reg = RREG32(RADEON_CONFIG_MEMSIZE);
607
608 if (reg)
609 return true;
610
611 return false;
612
613}
614
0c195119
AD
615/**
616 * radeon_update_bandwidth_info - update display bandwidth params
617 *
618 * @rdev: radeon_device pointer
619 *
620 * Used when sclk/mclk are switched or display modes are set.
621 * params are used to calculate display watermarks (all asics)
622 */
f47299c5
AD
623void radeon_update_bandwidth_info(struct radeon_device *rdev)
624{
625 fixed20_12 a;
8807286e
AD
626 u32 sclk = rdev->pm.current_sclk;
627 u32 mclk = rdev->pm.current_mclk;
f47299c5 628
8807286e
AD
629 /* sclk/mclk in Mhz */
630 a.full = dfixed_const(100);
631 rdev->pm.sclk.full = dfixed_const(sclk);
632 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
633 rdev->pm.mclk.full = dfixed_const(mclk);
634 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 635
8807286e 636 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 637 a.full = dfixed_const(16);
f47299c5 638 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 639 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
640 }
641}
642
0c195119
AD
643/**
644 * radeon_boot_test_post_card - check and possibly initialize the hw
645 *
646 * @rdev: radeon_device pointer
647 *
648 * Check if the asic is initialized and if not, attempt to initialize
649 * it (all asics).
650 * Returns true if initialized or false if not.
651 */
72542d77
DA
652bool radeon_boot_test_post_card(struct radeon_device *rdev)
653{
654 if (radeon_card_posted(rdev))
655 return true;
656
657 if (rdev->bios) {
658 DRM_INFO("GPU not posted. posting now...\n");
659 if (rdev->is_atom_bios)
660 atom_asic_init(rdev->mode_info.atom_context);
661 else
662 radeon_combios_asic_init(rdev->ddev);
663 return true;
664 } else {
665 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
666 return false;
667 }
668}
669
0c195119
AD
670/**
671 * radeon_dummy_page_init - init dummy page used by the driver
672 *
673 * @rdev: radeon_device pointer
674 *
675 * Allocate the dummy page used by the driver (all asics).
676 * This dummy page is used by the driver as a filler for gart entries
677 * when pages are taken out of the GART
678 * Returns 0 on sucess, -ENOMEM on failure.
679 */
3ce0a23d
JG
680int radeon_dummy_page_init(struct radeon_device *rdev)
681{
82568565
DA
682 if (rdev->dummy_page.page)
683 return 0;
3ce0a23d
JG
684 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
685 if (rdev->dummy_page.page == NULL)
686 return -ENOMEM;
687 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
688 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
689 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
690 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
691 __free_page(rdev->dummy_page.page);
692 rdev->dummy_page.page = NULL;
693 return -ENOMEM;
694 }
695 return 0;
696}
697
0c195119
AD
698/**
699 * radeon_dummy_page_fini - free dummy page used by the driver
700 *
701 * @rdev: radeon_device pointer
702 *
703 * Frees the dummy page used by the driver (all asics).
704 */
3ce0a23d
JG
705void radeon_dummy_page_fini(struct radeon_device *rdev)
706{
707 if (rdev->dummy_page.page == NULL)
708 return;
709 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
710 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
711 __free_page(rdev->dummy_page.page);
712 rdev->dummy_page.page = NULL;
713}
714
771fe6b9 715
771fe6b9 716/* ATOM accessor methods */
0c195119
AD
717/*
718 * ATOM is an interpreted byte code stored in tables in the vbios. The
719 * driver registers callbacks to access registers and the interpreter
720 * in the driver parses the tables and executes then to program specific
721 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
722 * atombios.h, and atom.c
723 */
724
725/**
726 * cail_pll_read - read PLL register
727 *
728 * @info: atom card_info pointer
729 * @reg: PLL register offset
730 *
731 * Provides a PLL register accessor for the atom interpreter (r4xx+).
732 * Returns the value of the PLL register.
733 */
771fe6b9
JG
734static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
735{
736 struct radeon_device *rdev = info->dev->dev_private;
737 uint32_t r;
738
739 r = rdev->pll_rreg(rdev, reg);
740 return r;
741}
742
0c195119
AD
743/**
744 * cail_pll_write - write PLL register
745 *
746 * @info: atom card_info pointer
747 * @reg: PLL register offset
748 * @val: value to write to the pll register
749 *
750 * Provides a PLL register accessor for the atom interpreter (r4xx+).
751 */
771fe6b9
JG
752static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
753{
754 struct radeon_device *rdev = info->dev->dev_private;
755
756 rdev->pll_wreg(rdev, reg, val);
757}
758
0c195119
AD
759/**
760 * cail_mc_read - read MC (Memory Controller) register
761 *
762 * @info: atom card_info pointer
763 * @reg: MC register offset
764 *
765 * Provides an MC register accessor for the atom interpreter (r4xx+).
766 * Returns the value of the MC register.
767 */
771fe6b9
JG
768static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
769{
770 struct radeon_device *rdev = info->dev->dev_private;
771 uint32_t r;
772
773 r = rdev->mc_rreg(rdev, reg);
774 return r;
775}
776
0c195119
AD
777/**
778 * cail_mc_write - write MC (Memory Controller) register
779 *
780 * @info: atom card_info pointer
781 * @reg: MC register offset
782 * @val: value to write to the pll register
783 *
784 * Provides a MC register accessor for the atom interpreter (r4xx+).
785 */
771fe6b9
JG
786static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
787{
788 struct radeon_device *rdev = info->dev->dev_private;
789
790 rdev->mc_wreg(rdev, reg, val);
791}
792
0c195119
AD
793/**
794 * cail_reg_write - write MMIO register
795 *
796 * @info: atom card_info pointer
797 * @reg: MMIO register offset
798 * @val: value to write to the pll register
799 *
800 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
801 */
771fe6b9
JG
802static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
803{
804 struct radeon_device *rdev = info->dev->dev_private;
805
806 WREG32(reg*4, val);
807}
808
0c195119
AD
809/**
810 * cail_reg_read - read MMIO register
811 *
812 * @info: atom card_info pointer
813 * @reg: MMIO register offset
814 *
815 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
816 * Returns the value of the MMIO register.
817 */
771fe6b9
JG
818static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
819{
820 struct radeon_device *rdev = info->dev->dev_private;
821 uint32_t r;
822
823 r = RREG32(reg*4);
824 return r;
825}
826
0c195119
AD
827/**
828 * cail_ioreg_write - write IO register
829 *
830 * @info: atom card_info pointer
831 * @reg: IO register offset
832 * @val: value to write to the pll register
833 *
834 * Provides a IO register accessor for the atom interpreter (r4xx+).
835 */
351a52a2
AD
836static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
837{
838 struct radeon_device *rdev = info->dev->dev_private;
839
840 WREG32_IO(reg*4, val);
841}
842
0c195119
AD
843/**
844 * cail_ioreg_read - read IO register
845 *
846 * @info: atom card_info pointer
847 * @reg: IO register offset
848 *
849 * Provides an IO register accessor for the atom interpreter (r4xx+).
850 * Returns the value of the IO register.
851 */
351a52a2
AD
852static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
853{
854 struct radeon_device *rdev = info->dev->dev_private;
855 uint32_t r;
856
857 r = RREG32_IO(reg*4);
858 return r;
859}
860
0c195119
AD
861/**
862 * radeon_atombios_init - init the driver info and callbacks for atombios
863 *
864 * @rdev: radeon_device pointer
865 *
866 * Initializes the driver info and register access callbacks for the
867 * ATOM interpreter (r4xx+).
868 * Returns 0 on sucess, -ENOMEM on failure.
869 * Called at driver startup.
870 */
771fe6b9
JG
871int radeon_atombios_init(struct radeon_device *rdev)
872{
61c4b24b
MF
873 struct card_info *atom_card_info =
874 kzalloc(sizeof(struct card_info), GFP_KERNEL);
875
876 if (!atom_card_info)
877 return -ENOMEM;
878
879 rdev->mode_info.atom_card_info = atom_card_info;
880 atom_card_info->dev = rdev->ddev;
881 atom_card_info->reg_read = cail_reg_read;
882 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
883 /* needed for iio ops */
884 if (rdev->rio_mem) {
885 atom_card_info->ioreg_read = cail_ioreg_read;
886 atom_card_info->ioreg_write = cail_ioreg_write;
887 } else {
888 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
889 atom_card_info->ioreg_read = cail_reg_read;
890 atom_card_info->ioreg_write = cail_reg_write;
891 }
61c4b24b
MF
892 atom_card_info->mc_read = cail_mc_read;
893 atom_card_info->mc_write = cail_mc_write;
894 atom_card_info->pll_read = cail_pll_read;
895 atom_card_info->pll_write = cail_pll_write;
896
897 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
898 if (!rdev->mode_info.atom_context) {
899 radeon_atombios_fini(rdev);
900 return -ENOMEM;
901 }
902
c31ad97f 903 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 904 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 905 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
906 return 0;
907}
908
0c195119
AD
909/**
910 * radeon_atombios_fini - free the driver info and callbacks for atombios
911 *
912 * @rdev: radeon_device pointer
913 *
914 * Frees the driver info and register access callbacks for the ATOM
915 * interpreter (r4xx+).
916 * Called at driver shutdown.
917 */
771fe6b9
JG
918void radeon_atombios_fini(struct radeon_device *rdev)
919{
4a04a844
JG
920 if (rdev->mode_info.atom_context) {
921 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 922 }
0e34d094
TG
923 kfree(rdev->mode_info.atom_context);
924 rdev->mode_info.atom_context = NULL;
61c4b24b 925 kfree(rdev->mode_info.atom_card_info);
0e34d094 926 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
927}
928
0c195119
AD
929/* COMBIOS */
930/*
931 * COMBIOS is the bios format prior to ATOM. It provides
932 * command tables similar to ATOM, but doesn't have a unified
933 * parser. See radeon_combios.c
934 */
935
936/**
937 * radeon_combios_init - init the driver info for combios
938 *
939 * @rdev: radeon_device pointer
940 *
941 * Initializes the driver info for combios (r1xx-r3xx).
942 * Returns 0 on sucess.
943 * Called at driver startup.
944 */
771fe6b9
JG
945int radeon_combios_init(struct radeon_device *rdev)
946{
947 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
948 return 0;
949}
950
0c195119
AD
951/**
952 * radeon_combios_fini - free the driver info for combios
953 *
954 * @rdev: radeon_device pointer
955 *
956 * Frees the driver info for combios (r1xx-r3xx).
957 * Called at driver shutdown.
958 */
771fe6b9
JG
959void radeon_combios_fini(struct radeon_device *rdev)
960{
961}
962
0c195119
AD
963/* if we get transitioned to only one device, take VGA back */
964/**
965 * radeon_vga_set_decode - enable/disable vga decode
966 *
967 * @cookie: radeon_device pointer
968 * @state: enable/disable vga decode
969 *
970 * Enable/disable vga decode (all asics).
971 * Returns VGA resource flags.
972 */
28d52043
DA
973static unsigned int radeon_vga_set_decode(void *cookie, bool state)
974{
975 struct radeon_device *rdev = cookie;
28d52043
DA
976 radeon_vga_set_state(rdev, state);
977 if (state)
978 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
979 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
980 else
981 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
982}
c1176d6f 983
1bcb04f7
CK
984/**
985 * radeon_check_pot_argument - check that argument is a power of two
986 *
987 * @arg: value to check
988 *
989 * Validates that a certain argument is a power of two (all asics).
990 * Returns true if argument is valid.
991 */
992static bool radeon_check_pot_argument(int arg)
993{
994 return (arg & (arg - 1)) == 0;
995}
996
0c195119
AD
997/**
998 * radeon_check_arguments - validate module params
999 *
1000 * @rdev: radeon_device pointer
1001 *
1002 * Validates certain module parameters and updates
1003 * the associated values used by the driver (all asics).
1004 */
1109ca09 1005static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
1006{
1007 /* vramlimit must be a power of two */
1bcb04f7 1008 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
1009 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1010 radeon_vram_limit);
1011 radeon_vram_limit = 0;
36421338 1012 }
1bcb04f7 1013
edcd26e8
AD
1014 if (radeon_gart_size == -1) {
1015 /* default to a larger gart size on newer asics */
1016 if (rdev->family >= CHIP_RV770)
1017 radeon_gart_size = 1024;
1018 else
1019 radeon_gart_size = 512;
1020 }
36421338 1021 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 1022 if (radeon_gart_size < 32) {
edcd26e8 1023 dev_warn(rdev->dev, "gart size (%d) too small\n",
36421338 1024 radeon_gart_size);
edcd26e8
AD
1025 if (rdev->family >= CHIP_RV770)
1026 radeon_gart_size = 1024;
1027 else
1028 radeon_gart_size = 512;
1bcb04f7 1029 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
1030 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1031 radeon_gart_size);
edcd26e8
AD
1032 if (rdev->family >= CHIP_RV770)
1033 radeon_gart_size = 1024;
1034 else
1035 radeon_gart_size = 512;
36421338 1036 }
1bcb04f7
CK
1037 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1038
36421338
JG
1039 /* AGP mode can only be -1, 1, 2, 4, 8 */
1040 switch (radeon_agpmode) {
1041 case -1:
1042 case 0:
1043 case 1:
1044 case 2:
1045 case 4:
1046 case 8:
1047 break;
1048 default:
1049 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1050 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1051 radeon_agpmode = 0;
1052 break;
1053 }
1054}
1055
d1f9809e
ML
1056/**
1057 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
1058 * needed for waking up.
1059 *
1060 * @pdev: pci dev pointer
1061 */
1062static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
1063{
1064
1065 /* 6600m in a macbook pro */
1066 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1067 pdev->subsystem_device == 0x00e2) {
1068 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
1069 return true;
1070 }
1071
1072 return false;
1073}
1074
0c195119
AD
1075/**
1076 * radeon_switcheroo_set_state - set switcheroo state
1077 *
1078 * @pdev: pci dev pointer
1079 * @state: vga switcheroo state
1080 *
1081 * Callback for the switcheroo driver. Suspends or resumes the
1082 * the asics before or after it is powered up using ACPI methods.
1083 */
6a9ee8af
DA
1084static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1085{
1086 struct drm_device *dev = pci_get_drvdata(pdev);
10ebc0bc 1087
90c4cde9 1088 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
10ebc0bc
DA
1089 return;
1090
6a9ee8af 1091 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
1092 unsigned d3_delay = dev->pdev->d3_delay;
1093
6a9ee8af
DA
1094 printk(KERN_INFO "radeon: switched on\n");
1095 /* don't suspend or resume card normally */
5bcf719b 1096 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e
ML
1097
1098 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
1099 dev->pdev->d3_delay = 20;
1100
10ebc0bc 1101 radeon_resume_kms(dev, true, true);
d1f9809e
ML
1102
1103 dev->pdev->d3_delay = d3_delay;
1104
5bcf719b 1105 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 1106 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
1107 } else {
1108 printk(KERN_INFO "radeon: switched off\n");
fbf81762 1109 drm_kms_helper_poll_disable(dev);
5bcf719b 1110 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
10ebc0bc 1111 radeon_suspend_kms(dev, true, true);
5bcf719b 1112 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
1113 }
1114}
1115
0c195119
AD
1116/**
1117 * radeon_switcheroo_can_switch - see if switcheroo state can change
1118 *
1119 * @pdev: pci dev pointer
1120 *
1121 * Callback for the switcheroo driver. Check of the switcheroo
1122 * state can be changed.
1123 * Returns true if the state can be changed, false if not.
1124 */
6a9ee8af
DA
1125static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1126{
1127 struct drm_device *dev = pci_get_drvdata(pdev);
1128 bool can_switch;
1129
1130 spin_lock(&dev->count_lock);
1131 can_switch = (dev->open_count == 0);
1132 spin_unlock(&dev->count_lock);
1133 return can_switch;
1134}
1135
26ec685f
TI
1136static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1137 .set_gpu_state = radeon_switcheroo_set_state,
1138 .reprobe = NULL,
1139 .can_switch = radeon_switcheroo_can_switch,
1140};
6a9ee8af 1141
0c195119
AD
1142/**
1143 * radeon_device_init - initialize the driver
1144 *
1145 * @rdev: radeon_device pointer
1146 * @pdev: drm dev pointer
1147 * @pdev: pci dev pointer
1148 * @flags: driver flags
1149 *
1150 * Initializes the driver info and hw (all asics).
1151 * Returns 0 for success or an error on failure.
1152 * Called at driver startup.
1153 */
771fe6b9
JG
1154int radeon_device_init(struct radeon_device *rdev,
1155 struct drm_device *ddev,
1156 struct pci_dev *pdev,
1157 uint32_t flags)
1158{
351a52a2 1159 int r, i;
ad49f501 1160 int dma_bits;
10ebc0bc 1161 bool runtime = false;
771fe6b9 1162
771fe6b9 1163 rdev->shutdown = false;
9f022ddf 1164 rdev->dev = &pdev->dev;
771fe6b9
JG
1165 rdev->ddev = ddev;
1166 rdev->pdev = pdev;
1167 rdev->flags = flags;
1168 rdev->family = flags & RADEON_FAMILY_MASK;
1169 rdev->is_atom_bios = false;
1170 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
edcd26e8 1171 rdev->mc.gtt_size = 512 * 1024 * 1024;
733289c2 1172 rdev->accel_working = false;
8b25ed34
AD
1173 /* set up ring ids */
1174 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1175 rdev->ring[i].idx = i;
1176 }
1b5331d9 1177
d522d9cc
TR
1178 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1179 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1180 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 1181
771fe6b9
JG
1182 /* mutex initialization are all done here so we
1183 * can recall function without having locking issues */
d6999bc7 1184 mutex_init(&rdev->ring_lock);
40bacf16 1185 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1186 atomic_set(&rdev->ih.lock, 0);
4c788679 1187 mutex_init(&rdev->gem.mutex);
c913e23a 1188 mutex_init(&rdev->pm.mutex);
6759a0a7 1189 mutex_init(&rdev->gpu_clock_mutex);
f61d5b46 1190 mutex_init(&rdev->srbm_mutex);
db7fce39 1191 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1192 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1193 init_waitqueue_head(&rdev->irq.vblank_queue);
1b9c3dd0
AD
1194 r = radeon_gem_init(rdev);
1195 if (r)
1196 return r;
529364e0 1197
23d4f1f2
AD
1198 /* Adjust VM size here.
1199 * Currently set to 4GB ((1 << 20) 4k pages).
1200 * Max GPUVM size for cayman and SI is 40 bits.
1201 */
721604a1 1202 rdev->vm_manager.max_pfn = 1 << 20;
771fe6b9 1203
4aac0473
JG
1204 /* Set asic functions */
1205 r = radeon_asic_init(rdev);
36421338 1206 if (r)
4aac0473 1207 return r;
36421338 1208 radeon_check_arguments(rdev);
4aac0473 1209
f95df9ca
AD
1210 /* all of the newer IGP chips have an internal gart
1211 * However some rs4xx report as AGP, so remove that here.
1212 */
1213 if ((rdev->family >= CHIP_RS400) &&
1214 (rdev->flags & RADEON_IS_IGP)) {
1215 rdev->flags &= ~RADEON_IS_AGP;
1216 }
1217
30256a3f 1218 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1219 radeon_agp_disable(rdev);
771fe6b9
JG
1220 }
1221
9ed8b1f9
AD
1222 /* Set the internal MC address mask
1223 * This is the max address of the GPU's
1224 * internal address space.
1225 */
1226 if (rdev->family >= CHIP_CAYMAN)
1227 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1228 else if (rdev->family >= CHIP_CEDAR)
1229 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1230 else
1231 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1232
ad49f501
DA
1233 /* set DMA mask + need_dma32 flags.
1234 * PCIE - can handle 40-bits.
005a83f1 1235 * IGP - can handle 40-bits
ad49f501 1236 * AGP - generally dma32 is safest
005a83f1 1237 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1238 */
1239 rdev->need_dma32 = false;
1240 if (rdev->flags & RADEON_IS_AGP)
1241 rdev->need_dma32 = true;
005a83f1 1242 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1243 (rdev->family <= CHIP_RS740))
ad49f501
DA
1244 rdev->need_dma32 = true;
1245
1246 dma_bits = rdev->need_dma32 ? 32 : 40;
1247 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1248 if (r) {
62fff811 1249 rdev->need_dma32 = true;
c52494f6 1250 dma_bits = 32;
771fe6b9
JG
1251 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1252 }
c52494f6
KRW
1253 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1254 if (r) {
1255 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1256 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1257 }
771fe6b9
JG
1258
1259 /* Registers mapping */
1260 /* TODO: block userspace mapping of io register */
2c385151 1261 spin_lock_init(&rdev->mmio_idx_lock);
fe78118c 1262 spin_lock_init(&rdev->smc_idx_lock);
0a5b7b0b
AD
1263 spin_lock_init(&rdev->pll_idx_lock);
1264 spin_lock_init(&rdev->mc_idx_lock);
1265 spin_lock_init(&rdev->pcie_idx_lock);
1266 spin_lock_init(&rdev->pciep_idx_lock);
1267 spin_lock_init(&rdev->pif_idx_lock);
1268 spin_lock_init(&rdev->cg_idx_lock);
1269 spin_lock_init(&rdev->uvd_idx_lock);
1270 spin_lock_init(&rdev->rcu_idx_lock);
1271 spin_lock_init(&rdev->didt_idx_lock);
1272 spin_lock_init(&rdev->end_idx_lock);
efad86db
AD
1273 if (rdev->family >= CHIP_BONAIRE) {
1274 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1275 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1276 } else {
1277 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1278 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1279 }
771fe6b9
JG
1280 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1281 if (rdev->rmmio == NULL) {
1282 return -ENOMEM;
1283 }
1284 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1285 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1286
75efdee1
AD
1287 /* doorbell bar mapping */
1288 if (rdev->family >= CHIP_BONAIRE)
1289 radeon_doorbell_init(rdev);
1290
351a52a2
AD
1291 /* io port mapping */
1292 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1293 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1294 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1295 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1296 break;
1297 }
1298 }
1299 if (rdev->rio_mem == NULL)
1300 DRM_ERROR("Unable to find PCI I/O BAR\n");
1301
28d52043 1302 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1303 /* this will fail for cards that aren't VGA class devices, just
1304 * ignore it */
1305 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
10ebc0bc 1306
90c4cde9 1307 if (rdev->flags & RADEON_IS_PX)
10ebc0bc
DA
1308 runtime = true;
1309 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1310 if (runtime)
1311 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
28d52043 1312
3ce0a23d 1313 r = radeon_init(rdev);
b574f251 1314 if (r)
3ce0a23d 1315 return r;
3ce0a23d 1316
04eb2206
CK
1317 r = radeon_ib_ring_tests(rdev);
1318 if (r)
1319 DRM_ERROR("ib ring test failed (%d).\n", r);
1320
409851f4
JG
1321 r = radeon_gem_debugfs_init(rdev);
1322 if (r) {
1323 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1324 }
1325
b574f251
JG
1326 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1327 /* Acceleration not working on AGP card try again
1328 * with fallback to PCI or PCIE GART
1329 */
a2d07b74 1330 radeon_asic_reset(rdev);
b574f251
JG
1331 radeon_fini(rdev);
1332 radeon_agp_disable(rdev);
1333 r = radeon_init(rdev);
4aac0473
JG
1334 if (r)
1335 return r;
771fe6b9 1336 }
6c7bccea 1337
60a7e396 1338 if ((radeon_testing & 1)) {
4a1132a0
AD
1339 if (rdev->accel_working)
1340 radeon_test_moves(rdev);
1341 else
1342 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
ecc0b326 1343 }
60a7e396 1344 if ((radeon_testing & 2)) {
4a1132a0
AD
1345 if (rdev->accel_working)
1346 radeon_test_syncing(rdev);
1347 else
1348 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
60a7e396 1349 }
771fe6b9 1350 if (radeon_benchmarking) {
4a1132a0
AD
1351 if (rdev->accel_working)
1352 radeon_benchmark(rdev, radeon_benchmarking);
1353 else
1354 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
771fe6b9 1355 }
6cf8a3f5 1356 return 0;
771fe6b9
JG
1357}
1358
4d8bf9ae
CK
1359static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1360
0c195119
AD
1361/**
1362 * radeon_device_fini - tear down the driver
1363 *
1364 * @rdev: radeon_device pointer
1365 *
1366 * Tear down the driver info (all asics).
1367 * Called at driver shutdown.
1368 */
771fe6b9
JG
1369void radeon_device_fini(struct radeon_device *rdev)
1370{
771fe6b9
JG
1371 DRM_INFO("radeon: finishing device.\n");
1372 rdev->shutdown = true;
90aca4d2
JG
1373 /* evict vram memory */
1374 radeon_bo_evict_vram(rdev);
62a8ea3f 1375 radeon_fini(rdev);
6a9ee8af 1376 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1377 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1378 if (rdev->rio_mem)
1379 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1380 rdev->rio_mem = NULL;
771fe6b9
JG
1381 iounmap(rdev->rmmio);
1382 rdev->rmmio = NULL;
75efdee1
AD
1383 if (rdev->family >= CHIP_BONAIRE)
1384 radeon_doorbell_fini(rdev);
4d8bf9ae 1385 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1386}
1387
1388
1389/*
1390 * Suspend & resume.
1391 */
0c195119
AD
1392/**
1393 * radeon_suspend_kms - initiate device suspend
1394 *
1395 * @pdev: drm dev pointer
1396 * @state: suspend state
1397 *
1398 * Puts the hw in the suspend state (all asics).
1399 * Returns 0 for success or an error on failure.
1400 * Called at driver suspend.
1401 */
10ebc0bc 1402int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
771fe6b9 1403{
875c1866 1404 struct radeon_device *rdev;
771fe6b9 1405 struct drm_crtc *crtc;
d8dcaa1d 1406 struct drm_connector *connector;
7465280c 1407 int i, r;
5f8f635e 1408 bool force_completion = false;
771fe6b9 1409
875c1866 1410 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1411 return -ENODEV;
1412 }
7473e830 1413
875c1866
DJ
1414 rdev = dev->dev_private;
1415
5bcf719b 1416 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1417 return 0;
d8dcaa1d 1418
86698c20
SF
1419 drm_kms_helper_poll_disable(dev);
1420
d8dcaa1d
AD
1421 /* turn off display hw */
1422 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1423 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1424 }
1425
771fe6b9
JG
1426 /* unpin the front buffers */
1427 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
f4510a27 1428 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
4c788679 1429 struct radeon_bo *robj;
771fe6b9
JG
1430
1431 if (rfb == NULL || rfb->obj == NULL) {
1432 continue;
1433 }
7e4d15d9 1434 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1435 /* don't unpin kernel fb objects */
1436 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1437 r = radeon_bo_reserve(robj, false);
38651674 1438 if (r == 0) {
4c788679
JG
1439 radeon_bo_unpin(robj);
1440 radeon_bo_unreserve(robj);
1441 }
771fe6b9
JG
1442 }
1443 }
1444 /* evict vram memory */
4c788679 1445 radeon_bo_evict_vram(rdev);
8a47cc9e 1446
771fe6b9 1447 /* wait for gpu to finish processing current batch */
5f8f635e 1448 for (i = 0; i < RADEON_NUM_RINGS; i++) {
37615527 1449 r = radeon_fence_wait_empty(rdev, i);
5f8f635e
JG
1450 if (r) {
1451 /* delay GPU reset to resume */
1452 force_completion = true;
1453 }
1454 }
1455 if (force_completion) {
1456 radeon_fence_driver_force_completion(rdev);
1457 }
771fe6b9 1458
f657c2a7
YZ
1459 radeon_save_bios_scratch_regs(rdev);
1460
62a8ea3f 1461 radeon_suspend(rdev);
d4877cf2 1462 radeon_hpd_fini(rdev);
771fe6b9 1463 /* evict remaining vram memory */
4c788679 1464 radeon_bo_evict_vram(rdev);
771fe6b9 1465
10b06122
JG
1466 radeon_agp_suspend(rdev);
1467
771fe6b9 1468 pci_save_state(dev->pdev);
7473e830 1469 if (suspend) {
771fe6b9
JG
1470 /* Shut down the device */
1471 pci_disable_device(dev->pdev);
1472 pci_set_power_state(dev->pdev, PCI_D3hot);
1473 }
10ebc0bc
DA
1474
1475 if (fbcon) {
1476 console_lock();
1477 radeon_fbdev_set_suspend(rdev, 1);
1478 console_unlock();
1479 }
771fe6b9
JG
1480 return 0;
1481}
1482
0c195119
AD
1483/**
1484 * radeon_resume_kms - initiate device resume
1485 *
1486 * @pdev: drm dev pointer
1487 *
1488 * Bring the hw back to operating state (all asics).
1489 * Returns 0 for success or an error on failure.
1490 * Called at driver resume.
1491 */
10ebc0bc 1492int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
771fe6b9 1493{
09bdf591 1494 struct drm_connector *connector;
771fe6b9 1495 struct radeon_device *rdev = dev->dev_private;
04eb2206 1496 int r;
771fe6b9 1497
5bcf719b 1498 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1499 return 0;
1500
10ebc0bc
DA
1501 if (fbcon) {
1502 console_lock();
1503 }
7473e830
DA
1504 if (resume) {
1505 pci_set_power_state(dev->pdev, PCI_D0);
1506 pci_restore_state(dev->pdev);
1507 if (pci_enable_device(dev->pdev)) {
10ebc0bc
DA
1508 if (fbcon)
1509 console_unlock();
7473e830
DA
1510 return -1;
1511 }
771fe6b9 1512 }
0ebf1717
DA
1513 /* resume AGP if in use */
1514 radeon_agp_resume(rdev);
62a8ea3f 1515 radeon_resume(rdev);
04eb2206
CK
1516
1517 r = radeon_ib_ring_tests(rdev);
1518 if (r)
1519 DRM_ERROR("ib ring test failed (%d).\n", r);
1520
bc6a6295 1521 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
6c7bccea
AD
1522 /* do dpm late init */
1523 r = radeon_pm_late_init(rdev);
1524 if (r) {
1525 rdev->pm.dpm_enabled = false;
1526 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1527 }
bc6a6295
AD
1528 } else {
1529 /* resume old pm late */
1530 radeon_pm_resume(rdev);
6c7bccea
AD
1531 }
1532
f657c2a7 1533 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1534
10ebc0bc
DA
1535 if (fbcon) {
1536 radeon_fbdev_set_suspend(rdev, 0);
1537 console_unlock();
1538 }
6c7bccea 1539
3fa47d9e
AD
1540 /* init dig PHYs, disp eng pll */
1541 if (rdev->is_atom_bios) {
ac89af1e 1542 radeon_atom_encoder_init(rdev);
f3f1f03e 1543 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1544 /* turn on the BL */
1545 if (rdev->mode_info.bl_encoder) {
1546 u8 bl_level = radeon_get_backlight_level(rdev,
1547 rdev->mode_info.bl_encoder);
1548 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1549 bl_level);
1550 }
3fa47d9e 1551 }
d4877cf2
AD
1552 /* reset hpd state */
1553 radeon_hpd_init(rdev);
771fe6b9 1554 /* blat the mode back in */
ec9954fc
DA
1555 if (fbcon) {
1556 drm_helper_resume_force_mode(dev);
1557 /* turn on display hw */
1558 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1559 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1560 }
a93f344d 1561 }
86698c20
SF
1562
1563 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
1564 return 0;
1565}
1566
0c195119
AD
1567/**
1568 * radeon_gpu_reset - reset the asic
1569 *
1570 * @rdev: radeon device pointer
1571 *
1572 * Attempt the reset the GPU if it has hung (all asics).
1573 * Returns 0 for success or an error on failure.
1574 */
90aca4d2
JG
1575int radeon_gpu_reset(struct radeon_device *rdev)
1576{
55d7c221
CK
1577 unsigned ring_sizes[RADEON_NUM_RINGS];
1578 uint32_t *ring_data[RADEON_NUM_RINGS];
1579
1580 bool saved = false;
1581
1582 int i, r;
8fd1b84c 1583 int resched;
90aca4d2 1584
dee53e7f 1585 down_write(&rdev->exclusive_lock);
f9eaf9ae
CK
1586
1587 if (!rdev->needs_reset) {
1588 up_write(&rdev->exclusive_lock);
1589 return 0;
1590 }
1591
1592 rdev->needs_reset = false;
1593
90aca4d2 1594 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1595 /* block TTM */
1596 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
95f59509 1597 radeon_pm_suspend(rdev);
90aca4d2
JG
1598 radeon_suspend(rdev);
1599
55d7c221
CK
1600 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1601 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1602 &ring_data[i]);
1603 if (ring_sizes[i]) {
1604 saved = true;
1605 dev_info(rdev->dev, "Saved %d dwords of commands "
1606 "on ring %d.\n", ring_sizes[i], i);
1607 }
1608 }
1609
1610retry:
90aca4d2
JG
1611 r = radeon_asic_reset(rdev);
1612 if (!r) {
55d7c221 1613 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1614 radeon_resume(rdev);
55d7c221 1615 }
04eb2206 1616
55d7c221 1617 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1618
55d7c221
CK
1619 if (!r) {
1620 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1621 radeon_ring_restore(rdev, &rdev->ring[i],
1622 ring_sizes[i], ring_data[i]);
f54b350d
CK
1623 ring_sizes[i] = 0;
1624 ring_data[i] = NULL;
55d7c221
CK
1625 }
1626
1627 r = radeon_ib_ring_tests(rdev);
1628 if (r) {
1629 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1630 if (saved) {
f54b350d 1631 saved = false;
55d7c221
CK
1632 radeon_suspend(rdev);
1633 goto retry;
1634 }
1635 }
1636 } else {
76903b96 1637 radeon_fence_driver_force_completion(rdev);
55d7c221
CK
1638 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1639 kfree(ring_data[i]);
1640 }
90aca4d2 1641 }
7a1619b9 1642
95f59509 1643 radeon_pm_resume(rdev);
d3493574
JG
1644 drm_helper_resume_force_mode(rdev->ddev);
1645
55d7c221 1646 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1647 if (r) {
1648 /* bad news, how to tell it to userspace ? */
1649 dev_info(rdev->dev, "GPU reset failed\n");
1650 }
1651
dee53e7f 1652 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1653 return r;
1654}
1655
771fe6b9
JG
1656
1657/*
1658 * Debugfs
1659 */
771fe6b9
JG
1660int radeon_debugfs_add_files(struct radeon_device *rdev,
1661 struct drm_info_list *files,
1662 unsigned nfiles)
1663{
1664 unsigned i;
1665
4d8bf9ae
CK
1666 for (i = 0; i < rdev->debugfs_count; i++) {
1667 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1668 /* Already registered */
1669 return 0;
1670 }
1671 }
c245cb9e 1672
4d8bf9ae 1673 i = rdev->debugfs_count + 1;
c245cb9e
MW
1674 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1675 DRM_ERROR("Reached maximum number of debugfs components.\n");
1676 DRM_ERROR("Report so we increase "
1677 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1678 return -EINVAL;
1679 }
4d8bf9ae
CK
1680 rdev->debugfs[rdev->debugfs_count].files = files;
1681 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1682 rdev->debugfs_count = i;
771fe6b9
JG
1683#if defined(CONFIG_DEBUG_FS)
1684 drm_debugfs_create_files(files, nfiles,
1685 rdev->ddev->control->debugfs_root,
1686 rdev->ddev->control);
1687 drm_debugfs_create_files(files, nfiles,
1688 rdev->ddev->primary->debugfs_root,
1689 rdev->ddev->primary);
1690#endif
1691 return 0;
1692}
1693
4d8bf9ae
CK
1694static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1695{
1696#if defined(CONFIG_DEBUG_FS)
1697 unsigned i;
1698
1699 for (i = 0; i < rdev->debugfs_count; i++) {
1700 drm_debugfs_remove_files(rdev->debugfs[i].files,
1701 rdev->debugfs[i].num_files,
1702 rdev->ddev->control);
1703 drm_debugfs_remove_files(rdev->debugfs[i].files,
1704 rdev->debugfs[i].num_files,
1705 rdev->ddev->primary);
1706 }
1707#endif
1708}
1709
771fe6b9
JG
1710#if defined(CONFIG_DEBUG_FS)
1711int radeon_debugfs_init(struct drm_minor *minor)
1712{
1713 return 0;
1714}
1715
1716void radeon_debugfs_cleanup(struct drm_minor *minor)
1717{
771fe6b9
JG
1718}
1719#endif