]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/gpu/drm/radeon/radeon_device.c
drm/radeon: Pass GART page flags to radeon_gart_set_page() explicitly
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
624d3524 96 "OLAND",
b5d9d726 97 "HAINAN",
6eac752e
AD
98 "BONAIRE",
99 "KAVERI",
100 "KABINI",
3bf599e8 101 "HAWAII",
b0a9f22a 102 "MULLINS",
1b5331d9
JG
103 "LAST",
104};
105
90c4cde9
AD
106bool radeon_is_px(struct drm_device *dev)
107{
108 struct radeon_device *rdev = dev->dev_private;
109
110 if (rdev->flags & RADEON_IS_PX)
111 return true;
112 return false;
113}
10ebc0bc 114
2e1b65f9
AD
115/**
116 * radeon_program_register_sequence - program an array of registers.
117 *
118 * @rdev: radeon_device pointer
119 * @registers: pointer to the register array
120 * @array_size: size of the register array
121 *
122 * Programs an array or registers with and and or masks.
123 * This is a helper for setting golden registers.
124 */
125void radeon_program_register_sequence(struct radeon_device *rdev,
126 const u32 *registers,
127 const u32 array_size)
128{
129 u32 tmp, reg, and_mask, or_mask;
130 int i;
131
132 if (array_size % 3)
133 return;
134
135 for (i = 0; i < array_size; i +=3) {
136 reg = registers[i + 0];
137 and_mask = registers[i + 1];
138 or_mask = registers[i + 2];
139
140 if (and_mask == 0xffffffff) {
141 tmp = or_mask;
142 } else {
143 tmp = RREG32(reg);
144 tmp &= ~and_mask;
145 tmp |= or_mask;
146 }
147 WREG32(reg, tmp);
148 }
149}
150
1a0041b8
AD
151void radeon_pci_config_reset(struct radeon_device *rdev)
152{
153 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
154}
155
0c195119
AD
156/**
157 * radeon_surface_init - Clear GPU surface registers.
158 *
159 * @rdev: radeon_device pointer
160 *
161 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 162 */
3ce0a23d 163void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
164{
165 /* FIXME: check this out */
166 if (rdev->family < CHIP_R600) {
167 int i;
168
550e2d92
DA
169 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
170 if (rdev->surface_regs[i].bo)
171 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
172 else
173 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 174 }
e024e110
DA
175 /* enable surfaces */
176 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
177 }
178}
179
771fe6b9
JG
180/*
181 * GPU scratch registers helpers function.
182 */
0c195119
AD
183/**
184 * radeon_scratch_init - Init scratch register driver information.
185 *
186 * @rdev: radeon_device pointer
187 *
188 * Init CP scratch register driver information (r1xx-r5xx)
189 */
3ce0a23d 190void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
191{
192 int i;
193
194 /* FIXME: check this out */
195 if (rdev->family < CHIP_R300) {
196 rdev->scratch.num_reg = 5;
197 } else {
198 rdev->scratch.num_reg = 7;
199 }
724c80e1 200 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
201 for (i = 0; i < rdev->scratch.num_reg; i++) {
202 rdev->scratch.free[i] = true;
724c80e1 203 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
204 }
205}
206
0c195119
AD
207/**
208 * radeon_scratch_get - Allocate a scratch register
209 *
210 * @rdev: radeon_device pointer
211 * @reg: scratch register mmio offset
212 *
213 * Allocate a CP scratch register for use by the driver (all asics).
214 * Returns 0 on success or -EINVAL on failure.
215 */
771fe6b9
JG
216int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
217{
218 int i;
219
220 for (i = 0; i < rdev->scratch.num_reg; i++) {
221 if (rdev->scratch.free[i]) {
222 rdev->scratch.free[i] = false;
223 *reg = rdev->scratch.reg[i];
224 return 0;
225 }
226 }
227 return -EINVAL;
228}
229
0c195119
AD
230/**
231 * radeon_scratch_free - Free a scratch register
232 *
233 * @rdev: radeon_device pointer
234 * @reg: scratch register mmio offset
235 *
236 * Free a CP scratch register allocated for use by the driver (all asics)
237 */
771fe6b9
JG
238void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
239{
240 int i;
241
242 for (i = 0; i < rdev->scratch.num_reg; i++) {
243 if (rdev->scratch.reg[i] == reg) {
244 rdev->scratch.free[i] = true;
245 return;
246 }
247 }
248}
249
75efdee1
AD
250/*
251 * GPU doorbell aperture helpers function.
252 */
253/**
254 * radeon_doorbell_init - Init doorbell driver information.
255 *
256 * @rdev: radeon_device pointer
257 *
258 * Init doorbell driver information (CIK)
259 * Returns 0 on success, error on failure.
260 */
28f5a6cd 261static int radeon_doorbell_init(struct radeon_device *rdev)
75efdee1 262{
75efdee1
AD
263 /* doorbell bar mapping */
264 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
265 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
266
d5754ab8
AL
267 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
268 if (rdev->doorbell.num_doorbells == 0)
269 return -EINVAL;
75efdee1 270
d5754ab8 271 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
75efdee1
AD
272 if (rdev->doorbell.ptr == NULL) {
273 return -ENOMEM;
274 }
275 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
276 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
277
d5754ab8 278 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
75efdee1 279
75efdee1
AD
280 return 0;
281}
282
283/**
284 * radeon_doorbell_fini - Tear down doorbell driver information.
285 *
286 * @rdev: radeon_device pointer
287 *
288 * Tear down doorbell driver information (CIK)
289 */
28f5a6cd 290static void radeon_doorbell_fini(struct radeon_device *rdev)
75efdee1
AD
291{
292 iounmap(rdev->doorbell.ptr);
293 rdev->doorbell.ptr = NULL;
294}
295
296/**
d5754ab8 297 * radeon_doorbell_get - Allocate a doorbell entry
75efdee1
AD
298 *
299 * @rdev: radeon_device pointer
d5754ab8 300 * @doorbell: doorbell index
75efdee1 301 *
d5754ab8 302 * Allocate a doorbell for use by the driver (all asics).
75efdee1
AD
303 * Returns 0 on success or -EINVAL on failure.
304 */
305int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
306{
d5754ab8
AL
307 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
308 if (offset < rdev->doorbell.num_doorbells) {
309 __set_bit(offset, rdev->doorbell.used);
310 *doorbell = offset;
311 return 0;
312 } else {
313 return -EINVAL;
75efdee1 314 }
75efdee1
AD
315}
316
317/**
d5754ab8 318 * radeon_doorbell_free - Free a doorbell entry
75efdee1
AD
319 *
320 * @rdev: radeon_device pointer
d5754ab8 321 * @doorbell: doorbell index
75efdee1 322 *
d5754ab8 323 * Free a doorbell allocated for use by the driver (all asics)
75efdee1
AD
324 */
325void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
326{
d5754ab8
AL
327 if (doorbell < rdev->doorbell.num_doorbells)
328 __clear_bit(doorbell, rdev->doorbell.used);
75efdee1
AD
329}
330
0c195119
AD
331/*
332 * radeon_wb_*()
333 * Writeback is the the method by which the the GPU updates special pages
334 * in memory with the status of certain GPU events (fences, ring pointers,
335 * etc.).
336 */
337
338/**
339 * radeon_wb_disable - Disable Writeback
340 *
341 * @rdev: radeon_device pointer
342 *
343 * Disables Writeback (all asics). Used for suspend.
344 */
724c80e1
AD
345void radeon_wb_disable(struct radeon_device *rdev)
346{
724c80e1
AD
347 rdev->wb.enabled = false;
348}
349
0c195119
AD
350/**
351 * radeon_wb_fini - Disable Writeback and free memory
352 *
353 * @rdev: radeon_device pointer
354 *
355 * Disables Writeback and frees the Writeback memory (all asics).
356 * Used at driver shutdown.
357 */
724c80e1
AD
358void radeon_wb_fini(struct radeon_device *rdev)
359{
360 radeon_wb_disable(rdev);
361 if (rdev->wb.wb_obj) {
089920f2
JG
362 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
363 radeon_bo_kunmap(rdev->wb.wb_obj);
364 radeon_bo_unpin(rdev->wb.wb_obj);
365 radeon_bo_unreserve(rdev->wb.wb_obj);
366 }
724c80e1
AD
367 radeon_bo_unref(&rdev->wb.wb_obj);
368 rdev->wb.wb = NULL;
369 rdev->wb.wb_obj = NULL;
370 }
371}
372
0c195119
AD
373/**
374 * radeon_wb_init- Init Writeback driver info and allocate memory
375 *
376 * @rdev: radeon_device pointer
377 *
378 * Disables Writeback and frees the Writeback memory (all asics).
379 * Used at driver startup.
380 * Returns 0 on success or an -error on failure.
381 */
724c80e1
AD
382int radeon_wb_init(struct radeon_device *rdev)
383{
384 int r;
385
386 if (rdev->wb.wb_obj == NULL) {
441921d5 387 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
40f5cf99 388 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
724c80e1
AD
389 if (r) {
390 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
391 return r;
392 }
089920f2
JG
393 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
394 if (unlikely(r != 0)) {
395 radeon_wb_fini(rdev);
396 return r;
397 }
398 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
399 &rdev->wb.gpu_addr);
400 if (r) {
401 radeon_bo_unreserve(rdev->wb.wb_obj);
402 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
403 radeon_wb_fini(rdev);
404 return r;
405 }
406 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
724c80e1 407 radeon_bo_unreserve(rdev->wb.wb_obj);
089920f2
JG
408 if (r) {
409 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
410 radeon_wb_fini(rdev);
411 return r;
412 }
724c80e1
AD
413 }
414
e6ba7599
AD
415 /* clear wb memory */
416 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
417 /* disable event_write fences */
418 rdev->wb.use_event = false;
724c80e1 419 /* disabled via module param */
3b7a2b24 420 if (radeon_no_wb == 1) {
724c80e1 421 rdev->wb.enabled = false;
3b7a2b24 422 } else {
724c80e1 423 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
424 /* often unreliable on AGP */
425 rdev->wb.enabled = false;
426 } else if (rdev->family < CHIP_R300) {
427 /* often unreliable on pre-r300 */
724c80e1 428 rdev->wb.enabled = false;
d0f8a854 429 } else {
724c80e1 430 rdev->wb.enabled = true;
d0f8a854 431 /* event_write fences are only available on r600+ */
3b7a2b24 432 if (rdev->family >= CHIP_R600) {
d0f8a854 433 rdev->wb.use_event = true;
3b7a2b24 434 }
d0f8a854 435 }
724c80e1 436 }
c994ead6
AD
437 /* always use writeback/events on NI, APUs */
438 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
439 rdev->wb.enabled = true;
440 rdev->wb.use_event = true;
441 }
724c80e1
AD
442
443 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
444
445 return 0;
446}
447
d594e46a
JG
448/**
449 * radeon_vram_location - try to find VRAM location
450 * @rdev: radeon device structure holding all necessary informations
451 * @mc: memory controller structure holding memory informations
452 * @base: base address at which to put VRAM
453 *
454 * Function will place try to place VRAM at base address provided
455 * as parameter (which is so far either PCI aperture address or
456 * for IGP TOM base address).
457 *
458 * If there is not enough space to fit the unvisible VRAM in the 32bits
459 * address space then we limit the VRAM size to the aperture.
460 *
461 * If we are using AGP and if the AGP aperture doesn't allow us to have
462 * room for all the VRAM than we restrict the VRAM to the PCI aperture
463 * size and print a warning.
464 *
465 * This function will never fails, worst case are limiting VRAM.
466 *
467 * Note: GTT start, end, size should be initialized before calling this
468 * function on AGP platform.
469 *
25985edc 470 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
471 * this shouldn't be a problem as we are using the PCI aperture as a reference.
472 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
473 * not IGP.
474 *
475 * Note: we use mc_vram_size as on some board we need to program the mc to
476 * cover the whole aperture even if VRAM size is inferior to aperture size
477 * Novell bug 204882 + along with lots of ubuntu ones
478 *
479 * Note: when limiting vram it's safe to overwritte real_vram_size because
480 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
481 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
482 * ones)
483 *
484 * Note: IGP TOM addr should be the same as the aperture addr, we don't
485 * explicitly check for that thought.
486 *
487 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 488 */
d594e46a 489void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 490{
1bcb04f7
CK
491 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
492
d594e46a 493 mc->vram_start = base;
9ed8b1f9 494 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
495 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
496 mc->real_vram_size = mc->aper_size;
497 mc->mc_vram_size = mc->aper_size;
498 }
499 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 500 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
501 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
502 mc->real_vram_size = mc->aper_size;
503 mc->mc_vram_size = mc->aper_size;
504 }
505 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
506 if (limit && limit < mc->real_vram_size)
507 mc->real_vram_size = limit;
dd7cc55a 508 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
509 mc->mc_vram_size >> 20, mc->vram_start,
510 mc->vram_end, mc->real_vram_size >> 20);
511}
771fe6b9 512
d594e46a
JG
513/**
514 * radeon_gtt_location - try to find GTT location
515 * @rdev: radeon device structure holding all necessary informations
516 * @mc: memory controller structure holding memory informations
517 *
518 * Function will place try to place GTT before or after VRAM.
519 *
520 * If GTT size is bigger than space left then we ajust GTT size.
521 * Thus function will never fails.
522 *
523 * FIXME: when reducing GTT size align new size on power of 2.
524 */
525void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
526{
527 u64 size_af, size_bf;
528
9ed8b1f9 529 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 530 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
531 if (size_bf > size_af) {
532 if (mc->gtt_size > size_bf) {
533 dev_warn(rdev->dev, "limiting GTT\n");
534 mc->gtt_size = size_bf;
771fe6b9 535 }
8d369bb1 536 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 537 } else {
d594e46a
JG
538 if (mc->gtt_size > size_af) {
539 dev_warn(rdev->dev, "limiting GTT\n");
540 mc->gtt_size = size_af;
541 }
8d369bb1 542 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 543 }
d594e46a 544 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 545 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 546 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
547}
548
771fe6b9
JG
549/*
550 * GPU helpers function.
551 */
0c195119
AD
552/**
553 * radeon_card_posted - check if the hw has already been initialized
554 *
555 * @rdev: radeon_device pointer
556 *
557 * Check if the asic has been initialized (all asics).
558 * Used at driver startup.
559 * Returns true if initialized or false if not.
560 */
9f022ddf 561bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
562{
563 uint32_t reg;
564
50a583f6 565 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
83e68189 566 if (efi_enabled(EFI_BOOT) &&
50a583f6
AD
567 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
568 (rdev->family < CHIP_R600))
bcc65fd8
MG
569 return false;
570
2cf3a4fc
AD
571 if (ASIC_IS_NODCE(rdev))
572 goto check_memsize;
573
771fe6b9 574 /* first check CRTCs */
09fb8bd1 575 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
576 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
577 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
578 if (rdev->num_crtc >= 4) {
579 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
580 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
581 }
582 if (rdev->num_crtc >= 6) {
583 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
584 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
585 }
bcc1c2a1
AD
586 if (reg & EVERGREEN_CRTC_MASTER_EN)
587 return true;
588 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
589 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
590 RREG32(AVIVO_D2CRTC_CONTROL);
591 if (reg & AVIVO_CRTC_EN) {
592 return true;
593 }
594 } else {
595 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
596 RREG32(RADEON_CRTC2_GEN_CNTL);
597 if (reg & RADEON_CRTC_EN) {
598 return true;
599 }
600 }
601
2cf3a4fc 602check_memsize:
771fe6b9
JG
603 /* then check MEM_SIZE, in case the crtcs are off */
604 if (rdev->family >= CHIP_R600)
605 reg = RREG32(R600_CONFIG_MEMSIZE);
606 else
607 reg = RREG32(RADEON_CONFIG_MEMSIZE);
608
609 if (reg)
610 return true;
611
612 return false;
613
614}
615
0c195119
AD
616/**
617 * radeon_update_bandwidth_info - update display bandwidth params
618 *
619 * @rdev: radeon_device pointer
620 *
621 * Used when sclk/mclk are switched or display modes are set.
622 * params are used to calculate display watermarks (all asics)
623 */
f47299c5
AD
624void radeon_update_bandwidth_info(struct radeon_device *rdev)
625{
626 fixed20_12 a;
8807286e
AD
627 u32 sclk = rdev->pm.current_sclk;
628 u32 mclk = rdev->pm.current_mclk;
f47299c5 629
8807286e
AD
630 /* sclk/mclk in Mhz */
631 a.full = dfixed_const(100);
632 rdev->pm.sclk.full = dfixed_const(sclk);
633 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
634 rdev->pm.mclk.full = dfixed_const(mclk);
635 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 636
8807286e 637 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 638 a.full = dfixed_const(16);
f47299c5 639 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 640 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
641 }
642}
643
0c195119
AD
644/**
645 * radeon_boot_test_post_card - check and possibly initialize the hw
646 *
647 * @rdev: radeon_device pointer
648 *
649 * Check if the asic is initialized and if not, attempt to initialize
650 * it (all asics).
651 * Returns true if initialized or false if not.
652 */
72542d77
DA
653bool radeon_boot_test_post_card(struct radeon_device *rdev)
654{
655 if (radeon_card_posted(rdev))
656 return true;
657
658 if (rdev->bios) {
659 DRM_INFO("GPU not posted. posting now...\n");
660 if (rdev->is_atom_bios)
661 atom_asic_init(rdev->mode_info.atom_context);
662 else
663 radeon_combios_asic_init(rdev->ddev);
664 return true;
665 } else {
666 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
667 return false;
668 }
669}
670
0c195119
AD
671/**
672 * radeon_dummy_page_init - init dummy page used by the driver
673 *
674 * @rdev: radeon_device pointer
675 *
676 * Allocate the dummy page used by the driver (all asics).
677 * This dummy page is used by the driver as a filler for gart entries
678 * when pages are taken out of the GART
679 * Returns 0 on sucess, -ENOMEM on failure.
680 */
3ce0a23d
JG
681int radeon_dummy_page_init(struct radeon_device *rdev)
682{
82568565
DA
683 if (rdev->dummy_page.page)
684 return 0;
3ce0a23d
JG
685 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
686 if (rdev->dummy_page.page == NULL)
687 return -ENOMEM;
688 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
689 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
690 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
691 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
692 __free_page(rdev->dummy_page.page);
693 rdev->dummy_page.page = NULL;
694 return -ENOMEM;
695 }
696 return 0;
697}
698
0c195119
AD
699/**
700 * radeon_dummy_page_fini - free dummy page used by the driver
701 *
702 * @rdev: radeon_device pointer
703 *
704 * Frees the dummy page used by the driver (all asics).
705 */
3ce0a23d
JG
706void radeon_dummy_page_fini(struct radeon_device *rdev)
707{
708 if (rdev->dummy_page.page == NULL)
709 return;
710 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
711 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
712 __free_page(rdev->dummy_page.page);
713 rdev->dummy_page.page = NULL;
714}
715
771fe6b9 716
771fe6b9 717/* ATOM accessor methods */
0c195119
AD
718/*
719 * ATOM is an interpreted byte code stored in tables in the vbios. The
720 * driver registers callbacks to access registers and the interpreter
721 * in the driver parses the tables and executes then to program specific
722 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
723 * atombios.h, and atom.c
724 */
725
726/**
727 * cail_pll_read - read PLL register
728 *
729 * @info: atom card_info pointer
730 * @reg: PLL register offset
731 *
732 * Provides a PLL register accessor for the atom interpreter (r4xx+).
733 * Returns the value of the PLL register.
734 */
771fe6b9
JG
735static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
736{
737 struct radeon_device *rdev = info->dev->dev_private;
738 uint32_t r;
739
740 r = rdev->pll_rreg(rdev, reg);
741 return r;
742}
743
0c195119
AD
744/**
745 * cail_pll_write - write PLL register
746 *
747 * @info: atom card_info pointer
748 * @reg: PLL register offset
749 * @val: value to write to the pll register
750 *
751 * Provides a PLL register accessor for the atom interpreter (r4xx+).
752 */
771fe6b9
JG
753static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
754{
755 struct radeon_device *rdev = info->dev->dev_private;
756
757 rdev->pll_wreg(rdev, reg, val);
758}
759
0c195119
AD
760/**
761 * cail_mc_read - read MC (Memory Controller) register
762 *
763 * @info: atom card_info pointer
764 * @reg: MC register offset
765 *
766 * Provides an MC register accessor for the atom interpreter (r4xx+).
767 * Returns the value of the MC register.
768 */
771fe6b9
JG
769static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
770{
771 struct radeon_device *rdev = info->dev->dev_private;
772 uint32_t r;
773
774 r = rdev->mc_rreg(rdev, reg);
775 return r;
776}
777
0c195119
AD
778/**
779 * cail_mc_write - write MC (Memory Controller) register
780 *
781 * @info: atom card_info pointer
782 * @reg: MC register offset
783 * @val: value to write to the pll register
784 *
785 * Provides a MC register accessor for the atom interpreter (r4xx+).
786 */
771fe6b9
JG
787static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
788{
789 struct radeon_device *rdev = info->dev->dev_private;
790
791 rdev->mc_wreg(rdev, reg, val);
792}
793
0c195119
AD
794/**
795 * cail_reg_write - write MMIO register
796 *
797 * @info: atom card_info pointer
798 * @reg: MMIO register offset
799 * @val: value to write to the pll register
800 *
801 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
802 */
771fe6b9
JG
803static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
804{
805 struct radeon_device *rdev = info->dev->dev_private;
806
807 WREG32(reg*4, val);
808}
809
0c195119
AD
810/**
811 * cail_reg_read - read MMIO register
812 *
813 * @info: atom card_info pointer
814 * @reg: MMIO register offset
815 *
816 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
817 * Returns the value of the MMIO register.
818 */
771fe6b9
JG
819static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
820{
821 struct radeon_device *rdev = info->dev->dev_private;
822 uint32_t r;
823
824 r = RREG32(reg*4);
825 return r;
826}
827
0c195119
AD
828/**
829 * cail_ioreg_write - write IO register
830 *
831 * @info: atom card_info pointer
832 * @reg: IO register offset
833 * @val: value to write to the pll register
834 *
835 * Provides a IO register accessor for the atom interpreter (r4xx+).
836 */
351a52a2
AD
837static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
838{
839 struct radeon_device *rdev = info->dev->dev_private;
840
841 WREG32_IO(reg*4, val);
842}
843
0c195119
AD
844/**
845 * cail_ioreg_read - read IO register
846 *
847 * @info: atom card_info pointer
848 * @reg: IO register offset
849 *
850 * Provides an IO register accessor for the atom interpreter (r4xx+).
851 * Returns the value of the IO register.
852 */
351a52a2
AD
853static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
854{
855 struct radeon_device *rdev = info->dev->dev_private;
856 uint32_t r;
857
858 r = RREG32_IO(reg*4);
859 return r;
860}
861
0c195119
AD
862/**
863 * radeon_atombios_init - init the driver info and callbacks for atombios
864 *
865 * @rdev: radeon_device pointer
866 *
867 * Initializes the driver info and register access callbacks for the
868 * ATOM interpreter (r4xx+).
869 * Returns 0 on sucess, -ENOMEM on failure.
870 * Called at driver startup.
871 */
771fe6b9
JG
872int radeon_atombios_init(struct radeon_device *rdev)
873{
61c4b24b
MF
874 struct card_info *atom_card_info =
875 kzalloc(sizeof(struct card_info), GFP_KERNEL);
876
877 if (!atom_card_info)
878 return -ENOMEM;
879
880 rdev->mode_info.atom_card_info = atom_card_info;
881 atom_card_info->dev = rdev->ddev;
882 atom_card_info->reg_read = cail_reg_read;
883 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
884 /* needed for iio ops */
885 if (rdev->rio_mem) {
886 atom_card_info->ioreg_read = cail_ioreg_read;
887 atom_card_info->ioreg_write = cail_ioreg_write;
888 } else {
889 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
890 atom_card_info->ioreg_read = cail_reg_read;
891 atom_card_info->ioreg_write = cail_reg_write;
892 }
61c4b24b
MF
893 atom_card_info->mc_read = cail_mc_read;
894 atom_card_info->mc_write = cail_mc_write;
895 atom_card_info->pll_read = cail_pll_read;
896 atom_card_info->pll_write = cail_pll_write;
897
898 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
899 if (!rdev->mode_info.atom_context) {
900 radeon_atombios_fini(rdev);
901 return -ENOMEM;
902 }
903
c31ad97f 904 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 905 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 906 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
907 return 0;
908}
909
0c195119
AD
910/**
911 * radeon_atombios_fini - free the driver info and callbacks for atombios
912 *
913 * @rdev: radeon_device pointer
914 *
915 * Frees the driver info and register access callbacks for the ATOM
916 * interpreter (r4xx+).
917 * Called at driver shutdown.
918 */
771fe6b9
JG
919void radeon_atombios_fini(struct radeon_device *rdev)
920{
4a04a844
JG
921 if (rdev->mode_info.atom_context) {
922 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 923 }
0e34d094
TG
924 kfree(rdev->mode_info.atom_context);
925 rdev->mode_info.atom_context = NULL;
61c4b24b 926 kfree(rdev->mode_info.atom_card_info);
0e34d094 927 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
928}
929
0c195119
AD
930/* COMBIOS */
931/*
932 * COMBIOS is the bios format prior to ATOM. It provides
933 * command tables similar to ATOM, but doesn't have a unified
934 * parser. See radeon_combios.c
935 */
936
937/**
938 * radeon_combios_init - init the driver info for combios
939 *
940 * @rdev: radeon_device pointer
941 *
942 * Initializes the driver info for combios (r1xx-r3xx).
943 * Returns 0 on sucess.
944 * Called at driver startup.
945 */
771fe6b9
JG
946int radeon_combios_init(struct radeon_device *rdev)
947{
948 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
949 return 0;
950}
951
0c195119
AD
952/**
953 * radeon_combios_fini - free the driver info for combios
954 *
955 * @rdev: radeon_device pointer
956 *
957 * Frees the driver info for combios (r1xx-r3xx).
958 * Called at driver shutdown.
959 */
771fe6b9
JG
960void radeon_combios_fini(struct radeon_device *rdev)
961{
962}
963
0c195119
AD
964/* if we get transitioned to only one device, take VGA back */
965/**
966 * radeon_vga_set_decode - enable/disable vga decode
967 *
968 * @cookie: radeon_device pointer
969 * @state: enable/disable vga decode
970 *
971 * Enable/disable vga decode (all asics).
972 * Returns VGA resource flags.
973 */
28d52043
DA
974static unsigned int radeon_vga_set_decode(void *cookie, bool state)
975{
976 struct radeon_device *rdev = cookie;
28d52043
DA
977 radeon_vga_set_state(rdev, state);
978 if (state)
979 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
980 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
981 else
982 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
983}
c1176d6f 984
1bcb04f7
CK
985/**
986 * radeon_check_pot_argument - check that argument is a power of two
987 *
988 * @arg: value to check
989 *
990 * Validates that a certain argument is a power of two (all asics).
991 * Returns true if argument is valid.
992 */
993static bool radeon_check_pot_argument(int arg)
994{
995 return (arg & (arg - 1)) == 0;
996}
997
0c195119
AD
998/**
999 * radeon_check_arguments - validate module params
1000 *
1001 * @rdev: radeon_device pointer
1002 *
1003 * Validates certain module parameters and updates
1004 * the associated values used by the driver (all asics).
1005 */
1109ca09 1006static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
1007{
1008 /* vramlimit must be a power of two */
1bcb04f7 1009 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
1010 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1011 radeon_vram_limit);
1012 radeon_vram_limit = 0;
36421338 1013 }
1bcb04f7 1014
edcd26e8
AD
1015 if (radeon_gart_size == -1) {
1016 /* default to a larger gart size on newer asics */
1017 if (rdev->family >= CHIP_RV770)
1018 radeon_gart_size = 1024;
1019 else
1020 radeon_gart_size = 512;
1021 }
36421338 1022 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 1023 if (radeon_gart_size < 32) {
edcd26e8 1024 dev_warn(rdev->dev, "gart size (%d) too small\n",
36421338 1025 radeon_gart_size);
edcd26e8
AD
1026 if (rdev->family >= CHIP_RV770)
1027 radeon_gart_size = 1024;
1028 else
1029 radeon_gart_size = 512;
1bcb04f7 1030 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
1031 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1032 radeon_gart_size);
edcd26e8
AD
1033 if (rdev->family >= CHIP_RV770)
1034 radeon_gart_size = 1024;
1035 else
1036 radeon_gart_size = 512;
36421338 1037 }
1bcb04f7
CK
1038 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1039
36421338
JG
1040 /* AGP mode can only be -1, 1, 2, 4, 8 */
1041 switch (radeon_agpmode) {
1042 case -1:
1043 case 0:
1044 case 1:
1045 case 2:
1046 case 4:
1047 case 8:
1048 break;
1049 default:
1050 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1051 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1052 radeon_agpmode = 0;
1053 break;
1054 }
c1c44132
CK
1055
1056 if (!radeon_check_pot_argument(radeon_vm_size)) {
1057 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1058 radeon_vm_size);
20b2656d 1059 radeon_vm_size = 4;
c1c44132
CK
1060 }
1061
20b2656d
CK
1062 if (radeon_vm_size < 1) {
1063 dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
c1c44132 1064 radeon_vm_size);
20b2656d 1065 radeon_vm_size = 4;
c1c44132
CK
1066 }
1067
1068 /*
1069 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1070 */
20b2656d
CK
1071 if (radeon_vm_size > 1024) {
1072 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
c1c44132 1073 radeon_vm_size);
20b2656d 1074 radeon_vm_size = 4;
c1c44132 1075 }
4510fb98
CK
1076
1077 /* defines number of bits in page table versus page directory,
1078 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1079 * page table and the remaining bits are in the page directory */
1080 if (radeon_vm_block_size < 9) {
20b2656d 1081 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
4510fb98
CK
1082 radeon_vm_block_size);
1083 radeon_vm_block_size = 9;
1084 }
1085
1086 if (radeon_vm_block_size > 24 ||
20b2656d
CK
1087 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1088 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
4510fb98
CK
1089 radeon_vm_block_size);
1090 radeon_vm_block_size = 9;
1091 }
36421338
JG
1092}
1093
d1f9809e
ML
1094/**
1095 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
1096 * needed for waking up.
1097 *
1098 * @pdev: pci dev pointer
1099 */
1100static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
1101{
1102
1103 /* 6600m in a macbook pro */
1104 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1105 pdev->subsystem_device == 0x00e2) {
1106 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
1107 return true;
1108 }
1109
1110 return false;
1111}
1112
0c195119
AD
1113/**
1114 * radeon_switcheroo_set_state - set switcheroo state
1115 *
1116 * @pdev: pci dev pointer
1117 * @state: vga switcheroo state
1118 *
1119 * Callback for the switcheroo driver. Suspends or resumes the
1120 * the asics before or after it is powered up using ACPI methods.
1121 */
6a9ee8af
DA
1122static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1123{
1124 struct drm_device *dev = pci_get_drvdata(pdev);
10ebc0bc 1125
90c4cde9 1126 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
10ebc0bc
DA
1127 return;
1128
6a9ee8af 1129 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
1130 unsigned d3_delay = dev->pdev->d3_delay;
1131
6a9ee8af
DA
1132 printk(KERN_INFO "radeon: switched on\n");
1133 /* don't suspend or resume card normally */
5bcf719b 1134 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e
ML
1135
1136 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
1137 dev->pdev->d3_delay = 20;
1138
10ebc0bc 1139 radeon_resume_kms(dev, true, true);
d1f9809e
ML
1140
1141 dev->pdev->d3_delay = d3_delay;
1142
5bcf719b 1143 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 1144 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
1145 } else {
1146 printk(KERN_INFO "radeon: switched off\n");
fbf81762 1147 drm_kms_helper_poll_disable(dev);
5bcf719b 1148 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
10ebc0bc 1149 radeon_suspend_kms(dev, true, true);
5bcf719b 1150 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
1151 }
1152}
1153
0c195119
AD
1154/**
1155 * radeon_switcheroo_can_switch - see if switcheroo state can change
1156 *
1157 * @pdev: pci dev pointer
1158 *
1159 * Callback for the switcheroo driver. Check of the switcheroo
1160 * state can be changed.
1161 * Returns true if the state can be changed, false if not.
1162 */
6a9ee8af
DA
1163static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1164{
1165 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af 1166
fc8fd40e
DV
1167 /*
1168 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1169 * locking inversion with the driver load path. And the access here is
1170 * completely racy anyway. So don't bother with locking for now.
1171 */
1172 return dev->open_count == 0;
6a9ee8af
DA
1173}
1174
26ec685f
TI
1175static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1176 .set_gpu_state = radeon_switcheroo_set_state,
1177 .reprobe = NULL,
1178 .can_switch = radeon_switcheroo_can_switch,
1179};
6a9ee8af 1180
0c195119
AD
1181/**
1182 * radeon_device_init - initialize the driver
1183 *
1184 * @rdev: radeon_device pointer
1185 * @pdev: drm dev pointer
1186 * @pdev: pci dev pointer
1187 * @flags: driver flags
1188 *
1189 * Initializes the driver info and hw (all asics).
1190 * Returns 0 for success or an error on failure.
1191 * Called at driver startup.
1192 */
771fe6b9
JG
1193int radeon_device_init(struct radeon_device *rdev,
1194 struct drm_device *ddev,
1195 struct pci_dev *pdev,
1196 uint32_t flags)
1197{
351a52a2 1198 int r, i;
ad49f501 1199 int dma_bits;
10ebc0bc 1200 bool runtime = false;
771fe6b9 1201
771fe6b9 1202 rdev->shutdown = false;
9f022ddf 1203 rdev->dev = &pdev->dev;
771fe6b9
JG
1204 rdev->ddev = ddev;
1205 rdev->pdev = pdev;
1206 rdev->flags = flags;
1207 rdev->family = flags & RADEON_FAMILY_MASK;
1208 rdev->is_atom_bios = false;
1209 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
edcd26e8 1210 rdev->mc.gtt_size = 512 * 1024 * 1024;
733289c2 1211 rdev->accel_working = false;
8b25ed34
AD
1212 /* set up ring ids */
1213 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1214 rdev->ring[i].idx = i;
1215 }
1b5331d9 1216
d522d9cc
TR
1217 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1218 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1219 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 1220
771fe6b9
JG
1221 /* mutex initialization are all done here so we
1222 * can recall function without having locking issues */
d6999bc7 1223 mutex_init(&rdev->ring_lock);
40bacf16 1224 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1225 atomic_set(&rdev->ih.lock, 0);
4c788679 1226 mutex_init(&rdev->gem.mutex);
c913e23a 1227 mutex_init(&rdev->pm.mutex);
6759a0a7 1228 mutex_init(&rdev->gpu_clock_mutex);
f61d5b46 1229 mutex_init(&rdev->srbm_mutex);
db7fce39 1230 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1231 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1232 init_waitqueue_head(&rdev->irq.vblank_queue);
1b9c3dd0
AD
1233 r = radeon_gem_init(rdev);
1234 if (r)
1235 return r;
529364e0 1236
c1c44132 1237 radeon_check_arguments(rdev);
23d4f1f2 1238 /* Adjust VM size here.
c1c44132 1239 * Max GPUVM size for cayman+ is 40 bits.
23d4f1f2 1240 */
20b2656d 1241 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
771fe6b9 1242
4aac0473
JG
1243 /* Set asic functions */
1244 r = radeon_asic_init(rdev);
36421338 1245 if (r)
4aac0473 1246 return r;
4aac0473 1247
f95df9ca
AD
1248 /* all of the newer IGP chips have an internal gart
1249 * However some rs4xx report as AGP, so remove that here.
1250 */
1251 if ((rdev->family >= CHIP_RS400) &&
1252 (rdev->flags & RADEON_IS_IGP)) {
1253 rdev->flags &= ~RADEON_IS_AGP;
1254 }
1255
30256a3f 1256 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1257 radeon_agp_disable(rdev);
771fe6b9
JG
1258 }
1259
9ed8b1f9
AD
1260 /* Set the internal MC address mask
1261 * This is the max address of the GPU's
1262 * internal address space.
1263 */
1264 if (rdev->family >= CHIP_CAYMAN)
1265 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1266 else if (rdev->family >= CHIP_CEDAR)
1267 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1268 else
1269 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1270
ad49f501
DA
1271 /* set DMA mask + need_dma32 flags.
1272 * PCIE - can handle 40-bits.
005a83f1 1273 * IGP - can handle 40-bits
ad49f501 1274 * AGP - generally dma32 is safest
005a83f1 1275 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1276 */
1277 rdev->need_dma32 = false;
1278 if (rdev->flags & RADEON_IS_AGP)
1279 rdev->need_dma32 = true;
005a83f1 1280 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1281 (rdev->family <= CHIP_RS740))
ad49f501
DA
1282 rdev->need_dma32 = true;
1283
1284 dma_bits = rdev->need_dma32 ? 32 : 40;
1285 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1286 if (r) {
62fff811 1287 rdev->need_dma32 = true;
c52494f6 1288 dma_bits = 32;
771fe6b9
JG
1289 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1290 }
c52494f6
KRW
1291 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1292 if (r) {
1293 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1294 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1295 }
771fe6b9
JG
1296
1297 /* Registers mapping */
1298 /* TODO: block userspace mapping of io register */
2c385151 1299 spin_lock_init(&rdev->mmio_idx_lock);
fe78118c 1300 spin_lock_init(&rdev->smc_idx_lock);
0a5b7b0b
AD
1301 spin_lock_init(&rdev->pll_idx_lock);
1302 spin_lock_init(&rdev->mc_idx_lock);
1303 spin_lock_init(&rdev->pcie_idx_lock);
1304 spin_lock_init(&rdev->pciep_idx_lock);
1305 spin_lock_init(&rdev->pif_idx_lock);
1306 spin_lock_init(&rdev->cg_idx_lock);
1307 spin_lock_init(&rdev->uvd_idx_lock);
1308 spin_lock_init(&rdev->rcu_idx_lock);
1309 spin_lock_init(&rdev->didt_idx_lock);
1310 spin_lock_init(&rdev->end_idx_lock);
efad86db
AD
1311 if (rdev->family >= CHIP_BONAIRE) {
1312 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1313 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1314 } else {
1315 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1316 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1317 }
771fe6b9
JG
1318 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1319 if (rdev->rmmio == NULL) {
1320 return -ENOMEM;
1321 }
1322 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1323 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1324
75efdee1
AD
1325 /* doorbell bar mapping */
1326 if (rdev->family >= CHIP_BONAIRE)
1327 radeon_doorbell_init(rdev);
1328
351a52a2
AD
1329 /* io port mapping */
1330 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1331 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1332 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1333 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1334 break;
1335 }
1336 }
1337 if (rdev->rio_mem == NULL)
1338 DRM_ERROR("Unable to find PCI I/O BAR\n");
1339
28d52043 1340 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1341 /* this will fail for cards that aren't VGA class devices, just
1342 * ignore it */
1343 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
10ebc0bc 1344
90c4cde9 1345 if (rdev->flags & RADEON_IS_PX)
10ebc0bc
DA
1346 runtime = true;
1347 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1348 if (runtime)
1349 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
28d52043 1350
3ce0a23d 1351 r = radeon_init(rdev);
b574f251 1352 if (r)
3ce0a23d 1353 return r;
3ce0a23d 1354
04eb2206
CK
1355 r = radeon_ib_ring_tests(rdev);
1356 if (r)
1357 DRM_ERROR("ib ring test failed (%d).\n", r);
1358
409851f4
JG
1359 r = radeon_gem_debugfs_init(rdev);
1360 if (r) {
1361 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1362 }
1363
b574f251
JG
1364 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1365 /* Acceleration not working on AGP card try again
1366 * with fallback to PCI or PCIE GART
1367 */
a2d07b74 1368 radeon_asic_reset(rdev);
b574f251
JG
1369 radeon_fini(rdev);
1370 radeon_agp_disable(rdev);
1371 r = radeon_init(rdev);
4aac0473
JG
1372 if (r)
1373 return r;
771fe6b9 1374 }
6c7bccea 1375
60a7e396 1376 if ((radeon_testing & 1)) {
4a1132a0
AD
1377 if (rdev->accel_working)
1378 radeon_test_moves(rdev);
1379 else
1380 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
ecc0b326 1381 }
60a7e396 1382 if ((radeon_testing & 2)) {
4a1132a0
AD
1383 if (rdev->accel_working)
1384 radeon_test_syncing(rdev);
1385 else
1386 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
60a7e396 1387 }
771fe6b9 1388 if (radeon_benchmarking) {
4a1132a0
AD
1389 if (rdev->accel_working)
1390 radeon_benchmark(rdev, radeon_benchmarking);
1391 else
1392 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
771fe6b9 1393 }
6cf8a3f5 1394 return 0;
771fe6b9
JG
1395}
1396
4d8bf9ae
CK
1397static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1398
0c195119
AD
1399/**
1400 * radeon_device_fini - tear down the driver
1401 *
1402 * @rdev: radeon_device pointer
1403 *
1404 * Tear down the driver info (all asics).
1405 * Called at driver shutdown.
1406 */
771fe6b9
JG
1407void radeon_device_fini(struct radeon_device *rdev)
1408{
771fe6b9
JG
1409 DRM_INFO("radeon: finishing device.\n");
1410 rdev->shutdown = true;
90aca4d2
JG
1411 /* evict vram memory */
1412 radeon_bo_evict_vram(rdev);
62a8ea3f 1413 radeon_fini(rdev);
6a9ee8af 1414 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1415 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1416 if (rdev->rio_mem)
1417 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1418 rdev->rio_mem = NULL;
771fe6b9
JG
1419 iounmap(rdev->rmmio);
1420 rdev->rmmio = NULL;
75efdee1
AD
1421 if (rdev->family >= CHIP_BONAIRE)
1422 radeon_doorbell_fini(rdev);
4d8bf9ae 1423 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1424}
1425
1426
1427/*
1428 * Suspend & resume.
1429 */
0c195119
AD
1430/**
1431 * radeon_suspend_kms - initiate device suspend
1432 *
1433 * @pdev: drm dev pointer
1434 * @state: suspend state
1435 *
1436 * Puts the hw in the suspend state (all asics).
1437 * Returns 0 for success or an error on failure.
1438 * Called at driver suspend.
1439 */
10ebc0bc 1440int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
771fe6b9 1441{
875c1866 1442 struct radeon_device *rdev;
771fe6b9 1443 struct drm_crtc *crtc;
d8dcaa1d 1444 struct drm_connector *connector;
7465280c 1445 int i, r;
5f8f635e 1446 bool force_completion = false;
771fe6b9 1447
875c1866 1448 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1449 return -ENODEV;
1450 }
7473e830 1451
875c1866
DJ
1452 rdev = dev->dev_private;
1453
5bcf719b 1454 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1455 return 0;
d8dcaa1d 1456
86698c20
SF
1457 drm_kms_helper_poll_disable(dev);
1458
d8dcaa1d
AD
1459 /* turn off display hw */
1460 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1461 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1462 }
1463
771fe6b9
JG
1464 /* unpin the front buffers */
1465 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
f4510a27 1466 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
4c788679 1467 struct radeon_bo *robj;
771fe6b9
JG
1468
1469 if (rfb == NULL || rfb->obj == NULL) {
1470 continue;
1471 }
7e4d15d9 1472 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1473 /* don't unpin kernel fb objects */
1474 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1475 r = radeon_bo_reserve(robj, false);
38651674 1476 if (r == 0) {
4c788679
JG
1477 radeon_bo_unpin(robj);
1478 radeon_bo_unreserve(robj);
1479 }
771fe6b9
JG
1480 }
1481 }
1482 /* evict vram memory */
4c788679 1483 radeon_bo_evict_vram(rdev);
8a47cc9e 1484
771fe6b9 1485 /* wait for gpu to finish processing current batch */
5f8f635e 1486 for (i = 0; i < RADEON_NUM_RINGS; i++) {
37615527 1487 r = radeon_fence_wait_empty(rdev, i);
5f8f635e
JG
1488 if (r) {
1489 /* delay GPU reset to resume */
1490 force_completion = true;
1491 }
1492 }
1493 if (force_completion) {
1494 radeon_fence_driver_force_completion(rdev);
1495 }
771fe6b9 1496
f657c2a7
YZ
1497 radeon_save_bios_scratch_regs(rdev);
1498
62a8ea3f 1499 radeon_suspend(rdev);
d4877cf2 1500 radeon_hpd_fini(rdev);
771fe6b9 1501 /* evict remaining vram memory */
4c788679 1502 radeon_bo_evict_vram(rdev);
771fe6b9 1503
10b06122
JG
1504 radeon_agp_suspend(rdev);
1505
771fe6b9 1506 pci_save_state(dev->pdev);
7473e830 1507 if (suspend) {
771fe6b9
JG
1508 /* Shut down the device */
1509 pci_disable_device(dev->pdev);
1510 pci_set_power_state(dev->pdev, PCI_D3hot);
1511 }
10ebc0bc
DA
1512
1513 if (fbcon) {
1514 console_lock();
1515 radeon_fbdev_set_suspend(rdev, 1);
1516 console_unlock();
1517 }
771fe6b9
JG
1518 return 0;
1519}
1520
0c195119
AD
1521/**
1522 * radeon_resume_kms - initiate device resume
1523 *
1524 * @pdev: drm dev pointer
1525 *
1526 * Bring the hw back to operating state (all asics).
1527 * Returns 0 for success or an error on failure.
1528 * Called at driver resume.
1529 */
10ebc0bc 1530int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
771fe6b9 1531{
09bdf591 1532 struct drm_connector *connector;
771fe6b9 1533 struct radeon_device *rdev = dev->dev_private;
04eb2206 1534 int r;
771fe6b9 1535
5bcf719b 1536 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1537 return 0;
1538
10ebc0bc
DA
1539 if (fbcon) {
1540 console_lock();
1541 }
7473e830
DA
1542 if (resume) {
1543 pci_set_power_state(dev->pdev, PCI_D0);
1544 pci_restore_state(dev->pdev);
1545 if (pci_enable_device(dev->pdev)) {
10ebc0bc
DA
1546 if (fbcon)
1547 console_unlock();
7473e830
DA
1548 return -1;
1549 }
771fe6b9 1550 }
0ebf1717
DA
1551 /* resume AGP if in use */
1552 radeon_agp_resume(rdev);
62a8ea3f 1553 radeon_resume(rdev);
04eb2206
CK
1554
1555 r = radeon_ib_ring_tests(rdev);
1556 if (r)
1557 DRM_ERROR("ib ring test failed (%d).\n", r);
1558
bc6a6295 1559 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
6c7bccea
AD
1560 /* do dpm late init */
1561 r = radeon_pm_late_init(rdev);
1562 if (r) {
1563 rdev->pm.dpm_enabled = false;
1564 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1565 }
bc6a6295
AD
1566 } else {
1567 /* resume old pm late */
1568 radeon_pm_resume(rdev);
6c7bccea
AD
1569 }
1570
f657c2a7 1571 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1572
3fa47d9e
AD
1573 /* init dig PHYs, disp eng pll */
1574 if (rdev->is_atom_bios) {
ac89af1e 1575 radeon_atom_encoder_init(rdev);
f3f1f03e 1576 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1577 /* turn on the BL */
1578 if (rdev->mode_info.bl_encoder) {
1579 u8 bl_level = radeon_get_backlight_level(rdev,
1580 rdev->mode_info.bl_encoder);
1581 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1582 bl_level);
1583 }
3fa47d9e 1584 }
d4877cf2
AD
1585 /* reset hpd state */
1586 radeon_hpd_init(rdev);
771fe6b9 1587 /* blat the mode back in */
ec9954fc
DA
1588 if (fbcon) {
1589 drm_helper_resume_force_mode(dev);
1590 /* turn on display hw */
1591 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1592 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1593 }
a93f344d 1594 }
86698c20
SF
1595
1596 drm_kms_helper_poll_enable(dev);
18ee37a4 1597
3640da2f
AD
1598 /* set the power state here in case we are a PX system or headless */
1599 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1600 radeon_pm_compute_clocks(rdev);
1601
18ee37a4
DV
1602 if (fbcon) {
1603 radeon_fbdev_set_suspend(rdev, 0);
1604 console_unlock();
1605 }
1606
771fe6b9
JG
1607 return 0;
1608}
1609
0c195119
AD
1610/**
1611 * radeon_gpu_reset - reset the asic
1612 *
1613 * @rdev: radeon device pointer
1614 *
1615 * Attempt the reset the GPU if it has hung (all asics).
1616 * Returns 0 for success or an error on failure.
1617 */
90aca4d2
JG
1618int radeon_gpu_reset(struct radeon_device *rdev)
1619{
55d7c221
CK
1620 unsigned ring_sizes[RADEON_NUM_RINGS];
1621 uint32_t *ring_data[RADEON_NUM_RINGS];
1622
1623 bool saved = false;
1624
1625 int i, r;
8fd1b84c 1626 int resched;
90aca4d2 1627
dee53e7f 1628 down_write(&rdev->exclusive_lock);
f9eaf9ae
CK
1629
1630 if (!rdev->needs_reset) {
1631 up_write(&rdev->exclusive_lock);
1632 return 0;
1633 }
1634
1635 rdev->needs_reset = false;
1636
90aca4d2 1637 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1638 /* block TTM */
1639 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
95f59509 1640 radeon_pm_suspend(rdev);
90aca4d2
JG
1641 radeon_suspend(rdev);
1642
55d7c221
CK
1643 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1644 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1645 &ring_data[i]);
1646 if (ring_sizes[i]) {
1647 saved = true;
1648 dev_info(rdev->dev, "Saved %d dwords of commands "
1649 "on ring %d.\n", ring_sizes[i], i);
1650 }
1651 }
1652
1653retry:
90aca4d2
JG
1654 r = radeon_asic_reset(rdev);
1655 if (!r) {
55d7c221 1656 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1657 radeon_resume(rdev);
55d7c221 1658 }
04eb2206 1659
55d7c221 1660 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1661
55d7c221
CK
1662 if (!r) {
1663 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1664 radeon_ring_restore(rdev, &rdev->ring[i],
1665 ring_sizes[i], ring_data[i]);
f54b350d
CK
1666 ring_sizes[i] = 0;
1667 ring_data[i] = NULL;
55d7c221
CK
1668 }
1669
1670 r = radeon_ib_ring_tests(rdev);
1671 if (r) {
1672 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1673 if (saved) {
f54b350d 1674 saved = false;
55d7c221
CK
1675 radeon_suspend(rdev);
1676 goto retry;
1677 }
1678 }
1679 } else {
76903b96 1680 radeon_fence_driver_force_completion(rdev);
55d7c221
CK
1681 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1682 kfree(ring_data[i]);
1683 }
90aca4d2 1684 }
7a1619b9 1685
95f59509 1686 radeon_pm_resume(rdev);
d3493574
JG
1687 drm_helper_resume_force_mode(rdev->ddev);
1688
55d7c221 1689 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1690 if (r) {
1691 /* bad news, how to tell it to userspace ? */
1692 dev_info(rdev->dev, "GPU reset failed\n");
1693 }
1694
dee53e7f 1695 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1696 return r;
1697}
1698
771fe6b9
JG
1699
1700/*
1701 * Debugfs
1702 */
771fe6b9
JG
1703int radeon_debugfs_add_files(struct radeon_device *rdev,
1704 struct drm_info_list *files,
1705 unsigned nfiles)
1706{
1707 unsigned i;
1708
4d8bf9ae
CK
1709 for (i = 0; i < rdev->debugfs_count; i++) {
1710 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1711 /* Already registered */
1712 return 0;
1713 }
1714 }
c245cb9e 1715
4d8bf9ae 1716 i = rdev->debugfs_count + 1;
c245cb9e
MW
1717 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1718 DRM_ERROR("Reached maximum number of debugfs components.\n");
1719 DRM_ERROR("Report so we increase "
1720 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1721 return -EINVAL;
1722 }
4d8bf9ae
CK
1723 rdev->debugfs[rdev->debugfs_count].files = files;
1724 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1725 rdev->debugfs_count = i;
771fe6b9
JG
1726#if defined(CONFIG_DEBUG_FS)
1727 drm_debugfs_create_files(files, nfiles,
1728 rdev->ddev->control->debugfs_root,
1729 rdev->ddev->control);
1730 drm_debugfs_create_files(files, nfiles,
1731 rdev->ddev->primary->debugfs_root,
1732 rdev->ddev->primary);
1733#endif
1734 return 0;
1735}
1736
4d8bf9ae
CK
1737static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1738{
1739#if defined(CONFIG_DEBUG_FS)
1740 unsigned i;
1741
1742 for (i = 0; i < rdev->debugfs_count; i++) {
1743 drm_debugfs_remove_files(rdev->debugfs[i].files,
1744 rdev->debugfs[i].num_files,
1745 rdev->ddev->control);
1746 drm_debugfs_remove_files(rdev->debugfs[i].files,
1747 rdev->debugfs[i].num_files,
1748 rdev->ddev->primary);
1749 }
1750#endif
1751}
1752
771fe6b9
JG
1753#if defined(CONFIG_DEBUG_FS)
1754int radeon_debugfs_init(struct drm_minor *minor)
1755{
1756 return 0;
1757}
1758
1759void radeon_debugfs_cleanup(struct drm_minor *minor)
1760{
771fe6b9
JG
1761}
1762#endif