]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/radeon/radeon_device.c
Merge remote-tracking branch 'asoc/fix/si476x' into asoc-next
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
624d3524 96 "OLAND",
1b5331d9
JG
97 "LAST",
98};
99
0c195119
AD
100/**
101 * radeon_surface_init - Clear GPU surface registers.
102 *
103 * @rdev: radeon_device pointer
104 *
105 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 106 */
3ce0a23d 107void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
108{
109 /* FIXME: check this out */
110 if (rdev->family < CHIP_R600) {
111 int i;
112
550e2d92
DA
113 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
114 if (rdev->surface_regs[i].bo)
115 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
116 else
117 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 118 }
e024e110
DA
119 /* enable surfaces */
120 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
121 }
122}
123
771fe6b9
JG
124/*
125 * GPU scratch registers helpers function.
126 */
0c195119
AD
127/**
128 * radeon_scratch_init - Init scratch register driver information.
129 *
130 * @rdev: radeon_device pointer
131 *
132 * Init CP scratch register driver information (r1xx-r5xx)
133 */
3ce0a23d 134void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
135{
136 int i;
137
138 /* FIXME: check this out */
139 if (rdev->family < CHIP_R300) {
140 rdev->scratch.num_reg = 5;
141 } else {
142 rdev->scratch.num_reg = 7;
143 }
724c80e1 144 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
145 for (i = 0; i < rdev->scratch.num_reg; i++) {
146 rdev->scratch.free[i] = true;
724c80e1 147 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
148 }
149}
150
0c195119
AD
151/**
152 * radeon_scratch_get - Allocate a scratch register
153 *
154 * @rdev: radeon_device pointer
155 * @reg: scratch register mmio offset
156 *
157 * Allocate a CP scratch register for use by the driver (all asics).
158 * Returns 0 on success or -EINVAL on failure.
159 */
771fe6b9
JG
160int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
161{
162 int i;
163
164 for (i = 0; i < rdev->scratch.num_reg; i++) {
165 if (rdev->scratch.free[i]) {
166 rdev->scratch.free[i] = false;
167 *reg = rdev->scratch.reg[i];
168 return 0;
169 }
170 }
171 return -EINVAL;
172}
173
0c195119
AD
174/**
175 * radeon_scratch_free - Free a scratch register
176 *
177 * @rdev: radeon_device pointer
178 * @reg: scratch register mmio offset
179 *
180 * Free a CP scratch register allocated for use by the driver (all asics)
181 */
771fe6b9
JG
182void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
183{
184 int i;
185
186 for (i = 0; i < rdev->scratch.num_reg; i++) {
187 if (rdev->scratch.reg[i] == reg) {
188 rdev->scratch.free[i] = true;
189 return;
190 }
191 }
192}
193
0c195119
AD
194/*
195 * radeon_wb_*()
196 * Writeback is the the method by which the the GPU updates special pages
197 * in memory with the status of certain GPU events (fences, ring pointers,
198 * etc.).
199 */
200
201/**
202 * radeon_wb_disable - Disable Writeback
203 *
204 * @rdev: radeon_device pointer
205 *
206 * Disables Writeback (all asics). Used for suspend.
207 */
724c80e1
AD
208void radeon_wb_disable(struct radeon_device *rdev)
209{
210 int r;
211
212 if (rdev->wb.wb_obj) {
213 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
214 if (unlikely(r != 0))
215 return;
216 radeon_bo_kunmap(rdev->wb.wb_obj);
217 radeon_bo_unpin(rdev->wb.wb_obj);
218 radeon_bo_unreserve(rdev->wb.wb_obj);
219 }
220 rdev->wb.enabled = false;
221}
222
0c195119
AD
223/**
224 * radeon_wb_fini - Disable Writeback and free memory
225 *
226 * @rdev: radeon_device pointer
227 *
228 * Disables Writeback and frees the Writeback memory (all asics).
229 * Used at driver shutdown.
230 */
724c80e1
AD
231void radeon_wb_fini(struct radeon_device *rdev)
232{
233 radeon_wb_disable(rdev);
234 if (rdev->wb.wb_obj) {
235 radeon_bo_unref(&rdev->wb.wb_obj);
236 rdev->wb.wb = NULL;
237 rdev->wb.wb_obj = NULL;
238 }
239}
240
0c195119
AD
241/**
242 * radeon_wb_init- Init Writeback driver info and allocate memory
243 *
244 * @rdev: radeon_device pointer
245 *
246 * Disables Writeback and frees the Writeback memory (all asics).
247 * Used at driver startup.
248 * Returns 0 on success or an -error on failure.
249 */
724c80e1
AD
250int radeon_wb_init(struct radeon_device *rdev)
251{
252 int r;
253
254 if (rdev->wb.wb_obj == NULL) {
441921d5 255 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
40f5cf99 256 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
724c80e1
AD
257 if (r) {
258 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
259 return r;
260 }
261 }
262 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
263 if (unlikely(r != 0)) {
264 radeon_wb_fini(rdev);
265 return r;
266 }
267 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
268 &rdev->wb.gpu_addr);
269 if (r) {
270 radeon_bo_unreserve(rdev->wb.wb_obj);
271 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
272 radeon_wb_fini(rdev);
273 return r;
274 }
275 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
276 radeon_bo_unreserve(rdev->wb.wb_obj);
277 if (r) {
278 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
279 radeon_wb_fini(rdev);
280 return r;
281 }
282
e6ba7599
AD
283 /* clear wb memory */
284 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
285 /* disable event_write fences */
286 rdev->wb.use_event = false;
724c80e1 287 /* disabled via module param */
3b7a2b24 288 if (radeon_no_wb == 1) {
724c80e1 289 rdev->wb.enabled = false;
3b7a2b24 290 } else {
724c80e1 291 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
292 /* often unreliable on AGP */
293 rdev->wb.enabled = false;
294 } else if (rdev->family < CHIP_R300) {
295 /* often unreliable on pre-r300 */
724c80e1 296 rdev->wb.enabled = false;
d0f8a854 297 } else {
724c80e1 298 rdev->wb.enabled = true;
d0f8a854 299 /* event_write fences are only available on r600+ */
3b7a2b24 300 if (rdev->family >= CHIP_R600) {
d0f8a854 301 rdev->wb.use_event = true;
3b7a2b24 302 }
d0f8a854 303 }
724c80e1 304 }
c994ead6
AD
305 /* always use writeback/events on NI, APUs */
306 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
307 rdev->wb.enabled = true;
308 rdev->wb.use_event = true;
309 }
724c80e1
AD
310
311 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
312
313 return 0;
314}
315
d594e46a
JG
316/**
317 * radeon_vram_location - try to find VRAM location
318 * @rdev: radeon device structure holding all necessary informations
319 * @mc: memory controller structure holding memory informations
320 * @base: base address at which to put VRAM
321 *
322 * Function will place try to place VRAM at base address provided
323 * as parameter (which is so far either PCI aperture address or
324 * for IGP TOM base address).
325 *
326 * If there is not enough space to fit the unvisible VRAM in the 32bits
327 * address space then we limit the VRAM size to the aperture.
328 *
329 * If we are using AGP and if the AGP aperture doesn't allow us to have
330 * room for all the VRAM than we restrict the VRAM to the PCI aperture
331 * size and print a warning.
332 *
333 * This function will never fails, worst case are limiting VRAM.
334 *
335 * Note: GTT start, end, size should be initialized before calling this
336 * function on AGP platform.
337 *
25985edc 338 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
339 * this shouldn't be a problem as we are using the PCI aperture as a reference.
340 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
341 * not IGP.
342 *
343 * Note: we use mc_vram_size as on some board we need to program the mc to
344 * cover the whole aperture even if VRAM size is inferior to aperture size
345 * Novell bug 204882 + along with lots of ubuntu ones
346 *
347 * Note: when limiting vram it's safe to overwritte real_vram_size because
348 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
349 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
350 * ones)
351 *
352 * Note: IGP TOM addr should be the same as the aperture addr, we don't
353 * explicitly check for that thought.
354 *
355 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 356 */
d594e46a 357void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 358{
1bcb04f7
CK
359 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
360
d594e46a
JG
361 mc->vram_start = base;
362 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
363 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
364 mc->real_vram_size = mc->aper_size;
365 mc->mc_vram_size = mc->aper_size;
366 }
367 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 368 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
369 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
370 mc->real_vram_size = mc->aper_size;
371 mc->mc_vram_size = mc->aper_size;
372 }
373 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
374 if (limit && limit < mc->real_vram_size)
375 mc->real_vram_size = limit;
dd7cc55a 376 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
377 mc->mc_vram_size >> 20, mc->vram_start,
378 mc->vram_end, mc->real_vram_size >> 20);
379}
771fe6b9 380
d594e46a
JG
381/**
382 * radeon_gtt_location - try to find GTT location
383 * @rdev: radeon device structure holding all necessary informations
384 * @mc: memory controller structure holding memory informations
385 *
386 * Function will place try to place GTT before or after VRAM.
387 *
388 * If GTT size is bigger than space left then we ajust GTT size.
389 * Thus function will never fails.
390 *
391 * FIXME: when reducing GTT size align new size on power of 2.
392 */
393void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
394{
395 u64 size_af, size_bf;
396
8d369bb1
AD
397 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
398 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
399 if (size_bf > size_af) {
400 if (mc->gtt_size > size_bf) {
401 dev_warn(rdev->dev, "limiting GTT\n");
402 mc->gtt_size = size_bf;
771fe6b9 403 }
8d369bb1 404 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 405 } else {
d594e46a
JG
406 if (mc->gtt_size > size_af) {
407 dev_warn(rdev->dev, "limiting GTT\n");
408 mc->gtt_size = size_af;
409 }
8d369bb1 410 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 411 }
d594e46a 412 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 413 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 414 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
415}
416
771fe6b9
JG
417/*
418 * GPU helpers function.
419 */
0c195119
AD
420/**
421 * radeon_card_posted - check if the hw has already been initialized
422 *
423 * @rdev: radeon_device pointer
424 *
425 * Check if the asic has been initialized (all asics).
426 * Used at driver startup.
427 * Returns true if initialized or false if not.
428 */
9f022ddf 429bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
430{
431 uint32_t reg;
432
83e68189
MF
433 if (efi_enabled(EFI_BOOT) &&
434 rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
bcc65fd8
MG
435 return false;
436
771fe6b9 437 /* first check CRTCs */
18007401
AD
438 if (ASIC_IS_DCE41(rdev)) {
439 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
440 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
441 if (reg & EVERGREEN_CRTC_MASTER_EN)
442 return true;
443 } else if (ASIC_IS_DCE4(rdev)) {
bcc1c2a1
AD
444 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
445 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
446 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
447 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
448 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
449 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
450 if (reg & EVERGREEN_CRTC_MASTER_EN)
451 return true;
452 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
453 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
454 RREG32(AVIVO_D2CRTC_CONTROL);
455 if (reg & AVIVO_CRTC_EN) {
456 return true;
457 }
458 } else {
459 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
460 RREG32(RADEON_CRTC2_GEN_CNTL);
461 if (reg & RADEON_CRTC_EN) {
462 return true;
463 }
464 }
465
466 /* then check MEM_SIZE, in case the crtcs are off */
467 if (rdev->family >= CHIP_R600)
468 reg = RREG32(R600_CONFIG_MEMSIZE);
469 else
470 reg = RREG32(RADEON_CONFIG_MEMSIZE);
471
472 if (reg)
473 return true;
474
475 return false;
476
477}
478
0c195119
AD
479/**
480 * radeon_update_bandwidth_info - update display bandwidth params
481 *
482 * @rdev: radeon_device pointer
483 *
484 * Used when sclk/mclk are switched or display modes are set.
485 * params are used to calculate display watermarks (all asics)
486 */
f47299c5
AD
487void radeon_update_bandwidth_info(struct radeon_device *rdev)
488{
489 fixed20_12 a;
8807286e
AD
490 u32 sclk = rdev->pm.current_sclk;
491 u32 mclk = rdev->pm.current_mclk;
f47299c5 492
8807286e
AD
493 /* sclk/mclk in Mhz */
494 a.full = dfixed_const(100);
495 rdev->pm.sclk.full = dfixed_const(sclk);
496 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
497 rdev->pm.mclk.full = dfixed_const(mclk);
498 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 499
8807286e 500 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 501 a.full = dfixed_const(16);
f47299c5 502 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 503 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
504 }
505}
506
0c195119
AD
507/**
508 * radeon_boot_test_post_card - check and possibly initialize the hw
509 *
510 * @rdev: radeon_device pointer
511 *
512 * Check if the asic is initialized and if not, attempt to initialize
513 * it (all asics).
514 * Returns true if initialized or false if not.
515 */
72542d77
DA
516bool radeon_boot_test_post_card(struct radeon_device *rdev)
517{
518 if (radeon_card_posted(rdev))
519 return true;
520
521 if (rdev->bios) {
522 DRM_INFO("GPU not posted. posting now...\n");
523 if (rdev->is_atom_bios)
524 atom_asic_init(rdev->mode_info.atom_context);
525 else
526 radeon_combios_asic_init(rdev->ddev);
527 return true;
528 } else {
529 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
530 return false;
531 }
532}
533
0c195119
AD
534/**
535 * radeon_dummy_page_init - init dummy page used by the driver
536 *
537 * @rdev: radeon_device pointer
538 *
539 * Allocate the dummy page used by the driver (all asics).
540 * This dummy page is used by the driver as a filler for gart entries
541 * when pages are taken out of the GART
542 * Returns 0 on sucess, -ENOMEM on failure.
543 */
3ce0a23d
JG
544int radeon_dummy_page_init(struct radeon_device *rdev)
545{
82568565
DA
546 if (rdev->dummy_page.page)
547 return 0;
3ce0a23d
JG
548 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
549 if (rdev->dummy_page.page == NULL)
550 return -ENOMEM;
551 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
552 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
553 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
554 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
555 __free_page(rdev->dummy_page.page);
556 rdev->dummy_page.page = NULL;
557 return -ENOMEM;
558 }
559 return 0;
560}
561
0c195119
AD
562/**
563 * radeon_dummy_page_fini - free dummy page used by the driver
564 *
565 * @rdev: radeon_device pointer
566 *
567 * Frees the dummy page used by the driver (all asics).
568 */
3ce0a23d
JG
569void radeon_dummy_page_fini(struct radeon_device *rdev)
570{
571 if (rdev->dummy_page.page == NULL)
572 return;
573 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
574 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
575 __free_page(rdev->dummy_page.page);
576 rdev->dummy_page.page = NULL;
577}
578
771fe6b9 579
771fe6b9 580/* ATOM accessor methods */
0c195119
AD
581/*
582 * ATOM is an interpreted byte code stored in tables in the vbios. The
583 * driver registers callbacks to access registers and the interpreter
584 * in the driver parses the tables and executes then to program specific
585 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
586 * atombios.h, and atom.c
587 */
588
589/**
590 * cail_pll_read - read PLL register
591 *
592 * @info: atom card_info pointer
593 * @reg: PLL register offset
594 *
595 * Provides a PLL register accessor for the atom interpreter (r4xx+).
596 * Returns the value of the PLL register.
597 */
771fe6b9
JG
598static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
599{
600 struct radeon_device *rdev = info->dev->dev_private;
601 uint32_t r;
602
603 r = rdev->pll_rreg(rdev, reg);
604 return r;
605}
606
0c195119
AD
607/**
608 * cail_pll_write - write PLL register
609 *
610 * @info: atom card_info pointer
611 * @reg: PLL register offset
612 * @val: value to write to the pll register
613 *
614 * Provides a PLL register accessor for the atom interpreter (r4xx+).
615 */
771fe6b9
JG
616static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
617{
618 struct radeon_device *rdev = info->dev->dev_private;
619
620 rdev->pll_wreg(rdev, reg, val);
621}
622
0c195119
AD
623/**
624 * cail_mc_read - read MC (Memory Controller) register
625 *
626 * @info: atom card_info pointer
627 * @reg: MC register offset
628 *
629 * Provides an MC register accessor for the atom interpreter (r4xx+).
630 * Returns the value of the MC register.
631 */
771fe6b9
JG
632static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
633{
634 struct radeon_device *rdev = info->dev->dev_private;
635 uint32_t r;
636
637 r = rdev->mc_rreg(rdev, reg);
638 return r;
639}
640
0c195119
AD
641/**
642 * cail_mc_write - write MC (Memory Controller) register
643 *
644 * @info: atom card_info pointer
645 * @reg: MC register offset
646 * @val: value to write to the pll register
647 *
648 * Provides a MC register accessor for the atom interpreter (r4xx+).
649 */
771fe6b9
JG
650static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
651{
652 struct radeon_device *rdev = info->dev->dev_private;
653
654 rdev->mc_wreg(rdev, reg, val);
655}
656
0c195119
AD
657/**
658 * cail_reg_write - write MMIO register
659 *
660 * @info: atom card_info pointer
661 * @reg: MMIO register offset
662 * @val: value to write to the pll register
663 *
664 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
665 */
771fe6b9
JG
666static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
667{
668 struct radeon_device *rdev = info->dev->dev_private;
669
670 WREG32(reg*4, val);
671}
672
0c195119
AD
673/**
674 * cail_reg_read - read MMIO register
675 *
676 * @info: atom card_info pointer
677 * @reg: MMIO register offset
678 *
679 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
680 * Returns the value of the MMIO register.
681 */
771fe6b9
JG
682static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
683{
684 struct radeon_device *rdev = info->dev->dev_private;
685 uint32_t r;
686
687 r = RREG32(reg*4);
688 return r;
689}
690
0c195119
AD
691/**
692 * cail_ioreg_write - write IO register
693 *
694 * @info: atom card_info pointer
695 * @reg: IO register offset
696 * @val: value to write to the pll register
697 *
698 * Provides a IO register accessor for the atom interpreter (r4xx+).
699 */
351a52a2
AD
700static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
701{
702 struct radeon_device *rdev = info->dev->dev_private;
703
704 WREG32_IO(reg*4, val);
705}
706
0c195119
AD
707/**
708 * cail_ioreg_read - read IO register
709 *
710 * @info: atom card_info pointer
711 * @reg: IO register offset
712 *
713 * Provides an IO register accessor for the atom interpreter (r4xx+).
714 * Returns the value of the IO register.
715 */
351a52a2
AD
716static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
717{
718 struct radeon_device *rdev = info->dev->dev_private;
719 uint32_t r;
720
721 r = RREG32_IO(reg*4);
722 return r;
723}
724
0c195119
AD
725/**
726 * radeon_atombios_init - init the driver info and callbacks for atombios
727 *
728 * @rdev: radeon_device pointer
729 *
730 * Initializes the driver info and register access callbacks for the
731 * ATOM interpreter (r4xx+).
732 * Returns 0 on sucess, -ENOMEM on failure.
733 * Called at driver startup.
734 */
771fe6b9
JG
735int radeon_atombios_init(struct radeon_device *rdev)
736{
61c4b24b
MF
737 struct card_info *atom_card_info =
738 kzalloc(sizeof(struct card_info), GFP_KERNEL);
739
740 if (!atom_card_info)
741 return -ENOMEM;
742
743 rdev->mode_info.atom_card_info = atom_card_info;
744 atom_card_info->dev = rdev->ddev;
745 atom_card_info->reg_read = cail_reg_read;
746 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
747 /* needed for iio ops */
748 if (rdev->rio_mem) {
749 atom_card_info->ioreg_read = cail_ioreg_read;
750 atom_card_info->ioreg_write = cail_ioreg_write;
751 } else {
752 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
753 atom_card_info->ioreg_read = cail_reg_read;
754 atom_card_info->ioreg_write = cail_reg_write;
755 }
61c4b24b
MF
756 atom_card_info->mc_read = cail_mc_read;
757 atom_card_info->mc_write = cail_mc_write;
758 atom_card_info->pll_read = cail_pll_read;
759 atom_card_info->pll_write = cail_pll_write;
760
761 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
762 if (!rdev->mode_info.atom_context) {
763 radeon_atombios_fini(rdev);
764 return -ENOMEM;
765 }
766
c31ad97f 767 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 768 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 769 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
770 return 0;
771}
772
0c195119
AD
773/**
774 * radeon_atombios_fini - free the driver info and callbacks for atombios
775 *
776 * @rdev: radeon_device pointer
777 *
778 * Frees the driver info and register access callbacks for the ATOM
779 * interpreter (r4xx+).
780 * Called at driver shutdown.
781 */
771fe6b9
JG
782void radeon_atombios_fini(struct radeon_device *rdev)
783{
4a04a844
JG
784 if (rdev->mode_info.atom_context) {
785 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 786 }
0e34d094
TG
787 kfree(rdev->mode_info.atom_context);
788 rdev->mode_info.atom_context = NULL;
61c4b24b 789 kfree(rdev->mode_info.atom_card_info);
0e34d094 790 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
791}
792
0c195119
AD
793/* COMBIOS */
794/*
795 * COMBIOS is the bios format prior to ATOM. It provides
796 * command tables similar to ATOM, but doesn't have a unified
797 * parser. See radeon_combios.c
798 */
799
800/**
801 * radeon_combios_init - init the driver info for combios
802 *
803 * @rdev: radeon_device pointer
804 *
805 * Initializes the driver info for combios (r1xx-r3xx).
806 * Returns 0 on sucess.
807 * Called at driver startup.
808 */
771fe6b9
JG
809int radeon_combios_init(struct radeon_device *rdev)
810{
811 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
812 return 0;
813}
814
0c195119
AD
815/**
816 * radeon_combios_fini - free the driver info for combios
817 *
818 * @rdev: radeon_device pointer
819 *
820 * Frees the driver info for combios (r1xx-r3xx).
821 * Called at driver shutdown.
822 */
771fe6b9
JG
823void radeon_combios_fini(struct radeon_device *rdev)
824{
825}
826
0c195119
AD
827/* if we get transitioned to only one device, take VGA back */
828/**
829 * radeon_vga_set_decode - enable/disable vga decode
830 *
831 * @cookie: radeon_device pointer
832 * @state: enable/disable vga decode
833 *
834 * Enable/disable vga decode (all asics).
835 * Returns VGA resource flags.
836 */
28d52043
DA
837static unsigned int radeon_vga_set_decode(void *cookie, bool state)
838{
839 struct radeon_device *rdev = cookie;
28d52043
DA
840 radeon_vga_set_state(rdev, state);
841 if (state)
842 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
843 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
844 else
845 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
846}
c1176d6f 847
1bcb04f7
CK
848/**
849 * radeon_check_pot_argument - check that argument is a power of two
850 *
851 * @arg: value to check
852 *
853 * Validates that a certain argument is a power of two (all asics).
854 * Returns true if argument is valid.
855 */
856static bool radeon_check_pot_argument(int arg)
857{
858 return (arg & (arg - 1)) == 0;
859}
860
0c195119
AD
861/**
862 * radeon_check_arguments - validate module params
863 *
864 * @rdev: radeon_device pointer
865 *
866 * Validates certain module parameters and updates
867 * the associated values used by the driver (all asics).
868 */
1109ca09 869static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
870{
871 /* vramlimit must be a power of two */
1bcb04f7 872 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
873 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
874 radeon_vram_limit);
875 radeon_vram_limit = 0;
36421338 876 }
1bcb04f7 877
36421338 878 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 879 if (radeon_gart_size < 32) {
36421338
JG
880 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
881 radeon_gart_size);
882 radeon_gart_size = 512;
1bcb04f7
CK
883
884 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
885 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
886 radeon_gart_size);
887 radeon_gart_size = 512;
36421338 888 }
1bcb04f7
CK
889 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
890
36421338
JG
891 /* AGP mode can only be -1, 1, 2, 4, 8 */
892 switch (radeon_agpmode) {
893 case -1:
894 case 0:
895 case 1:
896 case 2:
897 case 4:
898 case 8:
899 break;
900 default:
901 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
902 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
903 radeon_agpmode = 0;
904 break;
905 }
906}
907
d1f9809e
ML
908/**
909 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
910 * needed for waking up.
911 *
912 * @pdev: pci dev pointer
913 */
914static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
915{
916
917 /* 6600m in a macbook pro */
918 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
919 pdev->subsystem_device == 0x00e2) {
920 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
921 return true;
922 }
923
924 return false;
925}
926
0c195119
AD
927/**
928 * radeon_switcheroo_set_state - set switcheroo state
929 *
930 * @pdev: pci dev pointer
931 * @state: vga switcheroo state
932 *
933 * Callback for the switcheroo driver. Suspends or resumes the
934 * the asics before or after it is powered up using ACPI methods.
935 */
6a9ee8af
DA
936static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
937{
938 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af
DA
939 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
940 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
941 unsigned d3_delay = dev->pdev->d3_delay;
942
6a9ee8af
DA
943 printk(KERN_INFO "radeon: switched on\n");
944 /* don't suspend or resume card normally */
5bcf719b 945 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e
ML
946
947 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
948 dev->pdev->d3_delay = 20;
949
6a9ee8af 950 radeon_resume_kms(dev);
d1f9809e
ML
951
952 dev->pdev->d3_delay = d3_delay;
953
5bcf719b 954 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 955 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
956 } else {
957 printk(KERN_INFO "radeon: switched off\n");
fbf81762 958 drm_kms_helper_poll_disable(dev);
5bcf719b 959 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 960 radeon_suspend_kms(dev, pmm);
5bcf719b 961 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
962 }
963}
964
0c195119
AD
965/**
966 * radeon_switcheroo_can_switch - see if switcheroo state can change
967 *
968 * @pdev: pci dev pointer
969 *
970 * Callback for the switcheroo driver. Check of the switcheroo
971 * state can be changed.
972 * Returns true if the state can be changed, false if not.
973 */
6a9ee8af
DA
974static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
975{
976 struct drm_device *dev = pci_get_drvdata(pdev);
977 bool can_switch;
978
979 spin_lock(&dev->count_lock);
980 can_switch = (dev->open_count == 0);
981 spin_unlock(&dev->count_lock);
982 return can_switch;
983}
984
26ec685f
TI
985static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
986 .set_gpu_state = radeon_switcheroo_set_state,
987 .reprobe = NULL,
988 .can_switch = radeon_switcheroo_can_switch,
989};
6a9ee8af 990
0c195119
AD
991/**
992 * radeon_device_init - initialize the driver
993 *
994 * @rdev: radeon_device pointer
995 * @pdev: drm dev pointer
996 * @pdev: pci dev pointer
997 * @flags: driver flags
998 *
999 * Initializes the driver info and hw (all asics).
1000 * Returns 0 for success or an error on failure.
1001 * Called at driver startup.
1002 */
771fe6b9
JG
1003int radeon_device_init(struct radeon_device *rdev,
1004 struct drm_device *ddev,
1005 struct pci_dev *pdev,
1006 uint32_t flags)
1007{
351a52a2 1008 int r, i;
ad49f501 1009 int dma_bits;
771fe6b9 1010
771fe6b9 1011 rdev->shutdown = false;
9f022ddf 1012 rdev->dev = &pdev->dev;
771fe6b9
JG
1013 rdev->ddev = ddev;
1014 rdev->pdev = pdev;
1015 rdev->flags = flags;
1016 rdev->family = flags & RADEON_FAMILY_MASK;
1017 rdev->is_atom_bios = false;
1018 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1019 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
733289c2 1020 rdev->accel_working = false;
8b25ed34
AD
1021 /* set up ring ids */
1022 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1023 rdev->ring[i].idx = i;
1024 }
1b5331d9 1025
d522d9cc
TR
1026 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1027 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1028 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 1029
771fe6b9
JG
1030 /* mutex initialization are all done here so we
1031 * can recall function without having locking issues */
d6999bc7 1032 mutex_init(&rdev->ring_lock);
40bacf16 1033 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1034 atomic_set(&rdev->ih.lock, 0);
4c788679 1035 mutex_init(&rdev->gem.mutex);
c913e23a 1036 mutex_init(&rdev->pm.mutex);
6759a0a7 1037 mutex_init(&rdev->gpu_clock_mutex);
db7fce39 1038 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1039 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1040 init_waitqueue_head(&rdev->irq.vblank_queue);
1b9c3dd0
AD
1041 r = radeon_gem_init(rdev);
1042 if (r)
1043 return r;
721604a1 1044 /* initialize vm here */
36ff39c4 1045 mutex_init(&rdev->vm_manager.lock);
23d4f1f2
AD
1046 /* Adjust VM size here.
1047 * Currently set to 4GB ((1 << 20) 4k pages).
1048 * Max GPUVM size for cayman and SI is 40 bits.
1049 */
721604a1
JG
1050 rdev->vm_manager.max_pfn = 1 << 20;
1051 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 1052
4aac0473
JG
1053 /* Set asic functions */
1054 r = radeon_asic_init(rdev);
36421338 1055 if (r)
4aac0473 1056 return r;
36421338 1057 radeon_check_arguments(rdev);
4aac0473 1058
f95df9ca
AD
1059 /* all of the newer IGP chips have an internal gart
1060 * However some rs4xx report as AGP, so remove that here.
1061 */
1062 if ((rdev->family >= CHIP_RS400) &&
1063 (rdev->flags & RADEON_IS_IGP)) {
1064 rdev->flags &= ~RADEON_IS_AGP;
1065 }
1066
30256a3f 1067 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1068 radeon_agp_disable(rdev);
771fe6b9
JG
1069 }
1070
ad49f501
DA
1071 /* set DMA mask + need_dma32 flags.
1072 * PCIE - can handle 40-bits.
005a83f1 1073 * IGP - can handle 40-bits
ad49f501 1074 * AGP - generally dma32 is safest
005a83f1 1075 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1076 */
1077 rdev->need_dma32 = false;
1078 if (rdev->flags & RADEON_IS_AGP)
1079 rdev->need_dma32 = true;
005a83f1 1080 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1081 (rdev->family <= CHIP_RS740))
ad49f501
DA
1082 rdev->need_dma32 = true;
1083
1084 dma_bits = rdev->need_dma32 ? 32 : 40;
1085 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1086 if (r) {
62fff811 1087 rdev->need_dma32 = true;
c52494f6 1088 dma_bits = 32;
771fe6b9
JG
1089 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1090 }
c52494f6
KRW
1091 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1092 if (r) {
1093 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1094 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1095 }
771fe6b9
JG
1096
1097 /* Registers mapping */
1098 /* TODO: block userspace mapping of io register */
2c385151 1099 spin_lock_init(&rdev->mmio_idx_lock);
01d73a69
JC
1100 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1101 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
771fe6b9
JG
1102 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1103 if (rdev->rmmio == NULL) {
1104 return -ENOMEM;
1105 }
1106 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1107 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1108
351a52a2
AD
1109 /* io port mapping */
1110 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1111 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1112 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1113 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1114 break;
1115 }
1116 }
1117 if (rdev->rio_mem == NULL)
1118 DRM_ERROR("Unable to find PCI I/O BAR\n");
1119
28d52043 1120 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1121 /* this will fail for cards that aren't VGA class devices, just
1122 * ignore it */
1123 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
26ec685f 1124 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
28d52043 1125
3ce0a23d 1126 r = radeon_init(rdev);
b574f251 1127 if (r)
3ce0a23d 1128 return r;
3ce0a23d 1129
04eb2206
CK
1130 r = radeon_ib_ring_tests(rdev);
1131 if (r)
1132 DRM_ERROR("ib ring test failed (%d).\n", r);
1133
b574f251
JG
1134 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1135 /* Acceleration not working on AGP card try again
1136 * with fallback to PCI or PCIE GART
1137 */
a2d07b74 1138 radeon_asic_reset(rdev);
b574f251
JG
1139 radeon_fini(rdev);
1140 radeon_agp_disable(rdev);
1141 r = radeon_init(rdev);
4aac0473
JG
1142 if (r)
1143 return r;
771fe6b9 1144 }
60a7e396 1145 if ((radeon_testing & 1)) {
ecc0b326
MD
1146 radeon_test_moves(rdev);
1147 }
60a7e396
CK
1148 if ((radeon_testing & 2)) {
1149 radeon_test_syncing(rdev);
1150 }
771fe6b9 1151 if (radeon_benchmarking) {
638dd7db 1152 radeon_benchmark(rdev, radeon_benchmarking);
771fe6b9 1153 }
6cf8a3f5 1154 return 0;
771fe6b9
JG
1155}
1156
4d8bf9ae
CK
1157static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1158
0c195119
AD
1159/**
1160 * radeon_device_fini - tear down the driver
1161 *
1162 * @rdev: radeon_device pointer
1163 *
1164 * Tear down the driver info (all asics).
1165 * Called at driver shutdown.
1166 */
771fe6b9
JG
1167void radeon_device_fini(struct radeon_device *rdev)
1168{
771fe6b9
JG
1169 DRM_INFO("radeon: finishing device.\n");
1170 rdev->shutdown = true;
90aca4d2
JG
1171 /* evict vram memory */
1172 radeon_bo_evict_vram(rdev);
62a8ea3f 1173 radeon_fini(rdev);
6a9ee8af 1174 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1175 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1176 if (rdev->rio_mem)
1177 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1178 rdev->rio_mem = NULL;
771fe6b9
JG
1179 iounmap(rdev->rmmio);
1180 rdev->rmmio = NULL;
4d8bf9ae 1181 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1182}
1183
1184
1185/*
1186 * Suspend & resume.
1187 */
0c195119
AD
1188/**
1189 * radeon_suspend_kms - initiate device suspend
1190 *
1191 * @pdev: drm dev pointer
1192 * @state: suspend state
1193 *
1194 * Puts the hw in the suspend state (all asics).
1195 * Returns 0 for success or an error on failure.
1196 * Called at driver suspend.
1197 */
771fe6b9
JG
1198int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1199{
875c1866 1200 struct radeon_device *rdev;
771fe6b9 1201 struct drm_crtc *crtc;
d8dcaa1d 1202 struct drm_connector *connector;
7465280c 1203 int i, r;
5f8f635e 1204 bool force_completion = false;
771fe6b9 1205
875c1866 1206 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1207 return -ENODEV;
1208 }
1209 if (state.event == PM_EVENT_PRETHAW) {
1210 return 0;
1211 }
875c1866
DJ
1212 rdev = dev->dev_private;
1213
5bcf719b 1214 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1215 return 0;
d8dcaa1d 1216
86698c20
SF
1217 drm_kms_helper_poll_disable(dev);
1218
d8dcaa1d
AD
1219 /* turn off display hw */
1220 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1221 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1222 }
1223
771fe6b9
JG
1224 /* unpin the front buffers */
1225 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1226 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 1227 struct radeon_bo *robj;
771fe6b9
JG
1228
1229 if (rfb == NULL || rfb->obj == NULL) {
1230 continue;
1231 }
7e4d15d9 1232 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1233 /* don't unpin kernel fb objects */
1234 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1235 r = radeon_bo_reserve(robj, false);
38651674 1236 if (r == 0) {
4c788679
JG
1237 radeon_bo_unpin(robj);
1238 radeon_bo_unreserve(robj);
1239 }
771fe6b9
JG
1240 }
1241 }
1242 /* evict vram memory */
4c788679 1243 radeon_bo_evict_vram(rdev);
8a47cc9e
CK
1244
1245 mutex_lock(&rdev->ring_lock);
771fe6b9 1246 /* wait for gpu to finish processing current batch */
5f8f635e
JG
1247 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1248 r = radeon_fence_wait_empty_locked(rdev, i);
1249 if (r) {
1250 /* delay GPU reset to resume */
1251 force_completion = true;
1252 }
1253 }
1254 if (force_completion) {
1255 radeon_fence_driver_force_completion(rdev);
1256 }
8a47cc9e 1257 mutex_unlock(&rdev->ring_lock);
771fe6b9 1258
f657c2a7
YZ
1259 radeon_save_bios_scratch_regs(rdev);
1260
ce8f5370 1261 radeon_pm_suspend(rdev);
62a8ea3f 1262 radeon_suspend(rdev);
d4877cf2 1263 radeon_hpd_fini(rdev);
771fe6b9 1264 /* evict remaining vram memory */
4c788679 1265 radeon_bo_evict_vram(rdev);
771fe6b9 1266
10b06122
JG
1267 radeon_agp_suspend(rdev);
1268
771fe6b9
JG
1269 pci_save_state(dev->pdev);
1270 if (state.event == PM_EVENT_SUSPEND) {
1271 /* Shut down the device */
1272 pci_disable_device(dev->pdev);
1273 pci_set_power_state(dev->pdev, PCI_D3hot);
1274 }
ac751efa 1275 console_lock();
38651674 1276 radeon_fbdev_set_suspend(rdev, 1);
ac751efa 1277 console_unlock();
771fe6b9
JG
1278 return 0;
1279}
1280
0c195119
AD
1281/**
1282 * radeon_resume_kms - initiate device resume
1283 *
1284 * @pdev: drm dev pointer
1285 *
1286 * Bring the hw back to operating state (all asics).
1287 * Returns 0 for success or an error on failure.
1288 * Called at driver resume.
1289 */
771fe6b9
JG
1290int radeon_resume_kms(struct drm_device *dev)
1291{
09bdf591 1292 struct drm_connector *connector;
771fe6b9 1293 struct radeon_device *rdev = dev->dev_private;
04eb2206 1294 int r;
771fe6b9 1295
5bcf719b 1296 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1297 return 0;
1298
ac751efa 1299 console_lock();
771fe6b9
JG
1300 pci_set_power_state(dev->pdev, PCI_D0);
1301 pci_restore_state(dev->pdev);
1302 if (pci_enable_device(dev->pdev)) {
ac751efa 1303 console_unlock();
771fe6b9
JG
1304 return -1;
1305 }
0ebf1717
DA
1306 /* resume AGP if in use */
1307 radeon_agp_resume(rdev);
62a8ea3f 1308 radeon_resume(rdev);
04eb2206
CK
1309
1310 r = radeon_ib_ring_tests(rdev);
1311 if (r)
1312 DRM_ERROR("ib ring test failed (%d).\n", r);
1313
ce8f5370 1314 radeon_pm_resume(rdev);
f657c2a7 1315 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1316
38651674 1317 radeon_fbdev_set_suspend(rdev, 0);
ac751efa 1318 console_unlock();
771fe6b9 1319
3fa47d9e
AD
1320 /* init dig PHYs, disp eng pll */
1321 if (rdev->is_atom_bios) {
ac89af1e 1322 radeon_atom_encoder_init(rdev);
f3f1f03e 1323 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1324 /* turn on the BL */
1325 if (rdev->mode_info.bl_encoder) {
1326 u8 bl_level = radeon_get_backlight_level(rdev,
1327 rdev->mode_info.bl_encoder);
1328 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1329 bl_level);
1330 }
3fa47d9e 1331 }
d4877cf2
AD
1332 /* reset hpd state */
1333 radeon_hpd_init(rdev);
771fe6b9
JG
1334 /* blat the mode back in */
1335 drm_helper_resume_force_mode(dev);
a93f344d
AD
1336 /* turn on display hw */
1337 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1338 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1339 }
86698c20
SF
1340
1341 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
1342 return 0;
1343}
1344
0c195119
AD
1345/**
1346 * radeon_gpu_reset - reset the asic
1347 *
1348 * @rdev: radeon device pointer
1349 *
1350 * Attempt the reset the GPU if it has hung (all asics).
1351 * Returns 0 for success or an error on failure.
1352 */
90aca4d2
JG
1353int radeon_gpu_reset(struct radeon_device *rdev)
1354{
55d7c221
CK
1355 unsigned ring_sizes[RADEON_NUM_RINGS];
1356 uint32_t *ring_data[RADEON_NUM_RINGS];
1357
1358 bool saved = false;
1359
1360 int i, r;
8fd1b84c 1361 int resched;
90aca4d2 1362
dee53e7f 1363 down_write(&rdev->exclusive_lock);
90aca4d2 1364 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1365 /* block TTM */
1366 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2
JG
1367 radeon_suspend(rdev);
1368
55d7c221
CK
1369 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1370 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1371 &ring_data[i]);
1372 if (ring_sizes[i]) {
1373 saved = true;
1374 dev_info(rdev->dev, "Saved %d dwords of commands "
1375 "on ring %d.\n", ring_sizes[i], i);
1376 }
1377 }
1378
1379retry:
90aca4d2
JG
1380 r = radeon_asic_reset(rdev);
1381 if (!r) {
55d7c221 1382 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1383 radeon_resume(rdev);
55d7c221 1384 }
04eb2206 1385
55d7c221 1386 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1387
55d7c221
CK
1388 if (!r) {
1389 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1390 radeon_ring_restore(rdev, &rdev->ring[i],
1391 ring_sizes[i], ring_data[i]);
f54b350d
CK
1392 ring_sizes[i] = 0;
1393 ring_data[i] = NULL;
55d7c221
CK
1394 }
1395
1396 r = radeon_ib_ring_tests(rdev);
1397 if (r) {
1398 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1399 if (saved) {
f54b350d 1400 saved = false;
55d7c221
CK
1401 radeon_suspend(rdev);
1402 goto retry;
1403 }
1404 }
1405 } else {
76903b96 1406 radeon_fence_driver_force_completion(rdev);
55d7c221
CK
1407 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1408 kfree(ring_data[i]);
1409 }
90aca4d2 1410 }
7a1619b9 1411
d3493574
JG
1412 drm_helper_resume_force_mode(rdev->ddev);
1413
55d7c221 1414 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1415 if (r) {
1416 /* bad news, how to tell it to userspace ? */
1417 dev_info(rdev->dev, "GPU reset failed\n");
1418 }
1419
dee53e7f 1420 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1421 return r;
1422}
1423
771fe6b9
JG
1424
1425/*
1426 * Debugfs
1427 */
771fe6b9
JG
1428int radeon_debugfs_add_files(struct radeon_device *rdev,
1429 struct drm_info_list *files,
1430 unsigned nfiles)
1431{
1432 unsigned i;
1433
4d8bf9ae
CK
1434 for (i = 0; i < rdev->debugfs_count; i++) {
1435 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1436 /* Already registered */
1437 return 0;
1438 }
1439 }
c245cb9e 1440
4d8bf9ae 1441 i = rdev->debugfs_count + 1;
c245cb9e
MW
1442 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1443 DRM_ERROR("Reached maximum number of debugfs components.\n");
1444 DRM_ERROR("Report so we increase "
1445 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1446 return -EINVAL;
1447 }
4d8bf9ae
CK
1448 rdev->debugfs[rdev->debugfs_count].files = files;
1449 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1450 rdev->debugfs_count = i;
771fe6b9
JG
1451#if defined(CONFIG_DEBUG_FS)
1452 drm_debugfs_create_files(files, nfiles,
1453 rdev->ddev->control->debugfs_root,
1454 rdev->ddev->control);
1455 drm_debugfs_create_files(files, nfiles,
1456 rdev->ddev->primary->debugfs_root,
1457 rdev->ddev->primary);
1458#endif
1459 return 0;
1460}
1461
4d8bf9ae
CK
1462static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1463{
1464#if defined(CONFIG_DEBUG_FS)
1465 unsigned i;
1466
1467 for (i = 0; i < rdev->debugfs_count; i++) {
1468 drm_debugfs_remove_files(rdev->debugfs[i].files,
1469 rdev->debugfs[i].num_files,
1470 rdev->ddev->control);
1471 drm_debugfs_remove_files(rdev->debugfs[i].files,
1472 rdev->debugfs[i].num_files,
1473 rdev->ddev->primary);
1474 }
1475#endif
1476}
1477
771fe6b9
JG
1478#if defined(CONFIG_DEBUG_FS)
1479int radeon_debugfs_init(struct drm_minor *minor)
1480{
1481 return 0;
1482}
1483
1484void radeon_debugfs_cleanup(struct drm_minor *minor)
1485{
771fe6b9
JG
1486}
1487#endif