]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/gpu/drm/radeon/radeon_device.c
drm/radeon: make ring rptr and wptr register offsets variable
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
1b5331d9
JG
92 "LAST",
93};
94
b1e3a6d1
MD
95/*
96 * Clear GPU surface registers.
97 */
3ce0a23d 98void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
99{
100 /* FIXME: check this out */
101 if (rdev->family < CHIP_R600) {
102 int i;
103
550e2d92
DA
104 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
105 if (rdev->surface_regs[i].bo)
106 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
107 else
108 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 109 }
e024e110
DA
110 /* enable surfaces */
111 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
112 }
113}
114
771fe6b9
JG
115/*
116 * GPU scratch registers helpers function.
117 */
3ce0a23d 118void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
119{
120 int i;
121
122 /* FIXME: check this out */
123 if (rdev->family < CHIP_R300) {
124 rdev->scratch.num_reg = 5;
125 } else {
126 rdev->scratch.num_reg = 7;
127 }
724c80e1 128 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
129 for (i = 0; i < rdev->scratch.num_reg; i++) {
130 rdev->scratch.free[i] = true;
724c80e1 131 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
132 }
133}
134
135int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
136{
137 int i;
138
139 for (i = 0; i < rdev->scratch.num_reg; i++) {
140 if (rdev->scratch.free[i]) {
141 rdev->scratch.free[i] = false;
142 *reg = rdev->scratch.reg[i];
143 return 0;
144 }
145 }
146 return -EINVAL;
147}
148
149void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
150{
151 int i;
152
153 for (i = 0; i < rdev->scratch.num_reg; i++) {
154 if (rdev->scratch.reg[i] == reg) {
155 rdev->scratch.free[i] = true;
156 return;
157 }
158 }
159}
160
724c80e1
AD
161void radeon_wb_disable(struct radeon_device *rdev)
162{
163 int r;
164
165 if (rdev->wb.wb_obj) {
166 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
167 if (unlikely(r != 0))
168 return;
169 radeon_bo_kunmap(rdev->wb.wb_obj);
170 radeon_bo_unpin(rdev->wb.wb_obj);
171 radeon_bo_unreserve(rdev->wb.wb_obj);
172 }
173 rdev->wb.enabled = false;
174}
175
176void radeon_wb_fini(struct radeon_device *rdev)
177{
178 radeon_wb_disable(rdev);
179 if (rdev->wb.wb_obj) {
180 radeon_bo_unref(&rdev->wb.wb_obj);
181 rdev->wb.wb = NULL;
182 rdev->wb.wb_obj = NULL;
183 }
184}
185
186int radeon_wb_init(struct radeon_device *rdev)
187{
188 int r;
189
190 if (rdev->wb.wb_obj == NULL) {
441921d5 191 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
724c80e1
AD
192 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
193 if (r) {
194 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
195 return r;
196 }
197 }
198 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
199 if (unlikely(r != 0)) {
200 radeon_wb_fini(rdev);
201 return r;
202 }
203 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
204 &rdev->wb.gpu_addr);
205 if (r) {
206 radeon_bo_unreserve(rdev->wb.wb_obj);
207 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
208 radeon_wb_fini(rdev);
209 return r;
210 }
211 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
212 radeon_bo_unreserve(rdev->wb.wb_obj);
213 if (r) {
214 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
215 radeon_wb_fini(rdev);
216 return r;
217 }
218
e6ba7599
AD
219 /* clear wb memory */
220 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
221 /* disable event_write fences */
222 rdev->wb.use_event = false;
724c80e1
AD
223 /* disabled via module param */
224 if (radeon_no_wb == 1)
225 rdev->wb.enabled = false;
226 else {
227 /* often unreliable on AGP */
228 if (rdev->flags & RADEON_IS_AGP) {
229 rdev->wb.enabled = false;
d0f8a854 230 } else {
724c80e1 231 rdev->wb.enabled = true;
d0f8a854
AD
232 /* event_write fences are only available on r600+ */
233 if (rdev->family >= CHIP_R600)
234 rdev->wb.use_event = true;
235 }
724c80e1 236 }
7d52785d
AD
237 /* always use writeback/events on NI */
238 if (ASIC_IS_DCE5(rdev)) {
239 rdev->wb.enabled = true;
240 rdev->wb.use_event = true;
241 }
724c80e1
AD
242
243 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
244
245 return 0;
246}
247
d594e46a
JG
248/**
249 * radeon_vram_location - try to find VRAM location
250 * @rdev: radeon device structure holding all necessary informations
251 * @mc: memory controller structure holding memory informations
252 * @base: base address at which to put VRAM
253 *
254 * Function will place try to place VRAM at base address provided
255 * as parameter (which is so far either PCI aperture address or
256 * for IGP TOM base address).
257 *
258 * If there is not enough space to fit the unvisible VRAM in the 32bits
259 * address space then we limit the VRAM size to the aperture.
260 *
261 * If we are using AGP and if the AGP aperture doesn't allow us to have
262 * room for all the VRAM than we restrict the VRAM to the PCI aperture
263 * size and print a warning.
264 *
265 * This function will never fails, worst case are limiting VRAM.
266 *
267 * Note: GTT start, end, size should be initialized before calling this
268 * function on AGP platform.
269 *
25985edc 270 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
271 * this shouldn't be a problem as we are using the PCI aperture as a reference.
272 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
273 * not IGP.
274 *
275 * Note: we use mc_vram_size as on some board we need to program the mc to
276 * cover the whole aperture even if VRAM size is inferior to aperture size
277 * Novell bug 204882 + along with lots of ubuntu ones
278 *
279 * Note: when limiting vram it's safe to overwritte real_vram_size because
280 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
281 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
282 * ones)
283 *
284 * Note: IGP TOM addr should be the same as the aperture addr, we don't
285 * explicitly check for that thought.
286 *
287 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 288 */
d594e46a 289void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 290{
d594e46a
JG
291 mc->vram_start = base;
292 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
293 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
294 mc->real_vram_size = mc->aper_size;
295 mc->mc_vram_size = mc->aper_size;
296 }
297 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 298 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
299 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
300 mc->real_vram_size = mc->aper_size;
301 mc->mc_vram_size = mc->aper_size;
302 }
303 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
ba95c45a
MD
304 if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
305 mc->real_vram_size = radeon_vram_limit;
dd7cc55a 306 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
307 mc->mc_vram_size >> 20, mc->vram_start,
308 mc->vram_end, mc->real_vram_size >> 20);
309}
771fe6b9 310
d594e46a
JG
311/**
312 * radeon_gtt_location - try to find GTT location
313 * @rdev: radeon device structure holding all necessary informations
314 * @mc: memory controller structure holding memory informations
315 *
316 * Function will place try to place GTT before or after VRAM.
317 *
318 * If GTT size is bigger than space left then we ajust GTT size.
319 * Thus function will never fails.
320 *
321 * FIXME: when reducing GTT size align new size on power of 2.
322 */
323void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
324{
325 u64 size_af, size_bf;
326
8d369bb1
AD
327 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
328 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
329 if (size_bf > size_af) {
330 if (mc->gtt_size > size_bf) {
331 dev_warn(rdev->dev, "limiting GTT\n");
332 mc->gtt_size = size_bf;
771fe6b9 333 }
8d369bb1 334 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 335 } else {
d594e46a
JG
336 if (mc->gtt_size > size_af) {
337 dev_warn(rdev->dev, "limiting GTT\n");
338 mc->gtt_size = size_af;
339 }
8d369bb1 340 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 341 }
d594e46a 342 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 343 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 344 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
345}
346
771fe6b9
JG
347/*
348 * GPU helpers function.
349 */
9f022ddf 350bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
351{
352 uint32_t reg;
353
bcc65fd8
MG
354 if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
355 return false;
356
771fe6b9 357 /* first check CRTCs */
18007401
AD
358 if (ASIC_IS_DCE41(rdev)) {
359 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
360 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
361 if (reg & EVERGREEN_CRTC_MASTER_EN)
362 return true;
363 } else if (ASIC_IS_DCE4(rdev)) {
bcc1c2a1
AD
364 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
365 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
366 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
367 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
368 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
369 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
370 if (reg & EVERGREEN_CRTC_MASTER_EN)
371 return true;
372 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
373 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
374 RREG32(AVIVO_D2CRTC_CONTROL);
375 if (reg & AVIVO_CRTC_EN) {
376 return true;
377 }
378 } else {
379 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
380 RREG32(RADEON_CRTC2_GEN_CNTL);
381 if (reg & RADEON_CRTC_EN) {
382 return true;
383 }
384 }
385
386 /* then check MEM_SIZE, in case the crtcs are off */
387 if (rdev->family >= CHIP_R600)
388 reg = RREG32(R600_CONFIG_MEMSIZE);
389 else
390 reg = RREG32(RADEON_CONFIG_MEMSIZE);
391
392 if (reg)
393 return true;
394
395 return false;
396
397}
398
f47299c5
AD
399void radeon_update_bandwidth_info(struct radeon_device *rdev)
400{
401 fixed20_12 a;
8807286e
AD
402 u32 sclk = rdev->pm.current_sclk;
403 u32 mclk = rdev->pm.current_mclk;
f47299c5 404
8807286e
AD
405 /* sclk/mclk in Mhz */
406 a.full = dfixed_const(100);
407 rdev->pm.sclk.full = dfixed_const(sclk);
408 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
409 rdev->pm.mclk.full = dfixed_const(mclk);
410 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 411
8807286e 412 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 413 a.full = dfixed_const(16);
f47299c5 414 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 415 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
416 }
417}
418
72542d77
DA
419bool radeon_boot_test_post_card(struct radeon_device *rdev)
420{
421 if (radeon_card_posted(rdev))
422 return true;
423
424 if (rdev->bios) {
425 DRM_INFO("GPU not posted. posting now...\n");
426 if (rdev->is_atom_bios)
427 atom_asic_init(rdev->mode_info.atom_context);
428 else
429 radeon_combios_asic_init(rdev->ddev);
430 return true;
431 } else {
432 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
433 return false;
434 }
435}
436
3ce0a23d
JG
437int radeon_dummy_page_init(struct radeon_device *rdev)
438{
82568565
DA
439 if (rdev->dummy_page.page)
440 return 0;
3ce0a23d
JG
441 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
442 if (rdev->dummy_page.page == NULL)
443 return -ENOMEM;
444 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
445 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
446 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
447 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
448 __free_page(rdev->dummy_page.page);
449 rdev->dummy_page.page = NULL;
450 return -ENOMEM;
451 }
452 return 0;
453}
454
455void radeon_dummy_page_fini(struct radeon_device *rdev)
456{
457 if (rdev->dummy_page.page == NULL)
458 return;
459 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
460 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
461 __free_page(rdev->dummy_page.page);
462 rdev->dummy_page.page = NULL;
463}
464
771fe6b9 465
771fe6b9
JG
466/* ATOM accessor methods */
467static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
468{
469 struct radeon_device *rdev = info->dev->dev_private;
470 uint32_t r;
471
472 r = rdev->pll_rreg(rdev, reg);
473 return r;
474}
475
476static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
477{
478 struct radeon_device *rdev = info->dev->dev_private;
479
480 rdev->pll_wreg(rdev, reg, val);
481}
482
483static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
484{
485 struct radeon_device *rdev = info->dev->dev_private;
486 uint32_t r;
487
488 r = rdev->mc_rreg(rdev, reg);
489 return r;
490}
491
492static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
493{
494 struct radeon_device *rdev = info->dev->dev_private;
495
496 rdev->mc_wreg(rdev, reg, val);
497}
498
499static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
500{
501 struct radeon_device *rdev = info->dev->dev_private;
502
503 WREG32(reg*4, val);
504}
505
506static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
507{
508 struct radeon_device *rdev = info->dev->dev_private;
509 uint32_t r;
510
511 r = RREG32(reg*4);
512 return r;
513}
514
351a52a2
AD
515static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
516{
517 struct radeon_device *rdev = info->dev->dev_private;
518
519 WREG32_IO(reg*4, val);
520}
521
522static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
523{
524 struct radeon_device *rdev = info->dev->dev_private;
525 uint32_t r;
526
527 r = RREG32_IO(reg*4);
528 return r;
529}
530
771fe6b9
JG
531int radeon_atombios_init(struct radeon_device *rdev)
532{
61c4b24b
MF
533 struct card_info *atom_card_info =
534 kzalloc(sizeof(struct card_info), GFP_KERNEL);
535
536 if (!atom_card_info)
537 return -ENOMEM;
538
539 rdev->mode_info.atom_card_info = atom_card_info;
540 atom_card_info->dev = rdev->ddev;
541 atom_card_info->reg_read = cail_reg_read;
542 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
543 /* needed for iio ops */
544 if (rdev->rio_mem) {
545 atom_card_info->ioreg_read = cail_ioreg_read;
546 atom_card_info->ioreg_write = cail_ioreg_write;
547 } else {
548 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
549 atom_card_info->ioreg_read = cail_reg_read;
550 atom_card_info->ioreg_write = cail_reg_write;
551 }
61c4b24b
MF
552 atom_card_info->mc_read = cail_mc_read;
553 atom_card_info->mc_write = cail_mc_write;
554 atom_card_info->pll_read = cail_pll_read;
555 atom_card_info->pll_write = cail_pll_write;
556
557 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
c31ad97f 558 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 559 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 560 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
561 return 0;
562}
563
564void radeon_atombios_fini(struct radeon_device *rdev)
565{
4a04a844
JG
566 if (rdev->mode_info.atom_context) {
567 kfree(rdev->mode_info.atom_context->scratch);
568 kfree(rdev->mode_info.atom_context);
569 }
61c4b24b 570 kfree(rdev->mode_info.atom_card_info);
771fe6b9
JG
571}
572
573int radeon_combios_init(struct radeon_device *rdev)
574{
575 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
576 return 0;
577}
578
579void radeon_combios_fini(struct radeon_device *rdev)
580{
581}
582
28d52043
DA
583/* if we get transitioned to only one device, tak VGA back */
584static unsigned int radeon_vga_set_decode(void *cookie, bool state)
585{
586 struct radeon_device *rdev = cookie;
28d52043
DA
587 radeon_vga_set_state(rdev, state);
588 if (state)
589 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
590 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
591 else
592 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
593}
c1176d6f 594
36421338
JG
595void radeon_check_arguments(struct radeon_device *rdev)
596{
597 /* vramlimit must be a power of two */
598 switch (radeon_vram_limit) {
599 case 0:
600 case 4:
601 case 8:
602 case 16:
603 case 32:
604 case 64:
605 case 128:
606 case 256:
607 case 512:
608 case 1024:
609 case 2048:
610 case 4096:
611 break;
612 default:
613 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
614 radeon_vram_limit);
615 radeon_vram_limit = 0;
616 break;
617 }
618 radeon_vram_limit = radeon_vram_limit << 20;
619 /* gtt size must be power of two and greater or equal to 32M */
620 switch (radeon_gart_size) {
621 case 4:
622 case 8:
623 case 16:
624 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
625 radeon_gart_size);
626 radeon_gart_size = 512;
627 break;
628 case 32:
629 case 64:
630 case 128:
631 case 256:
632 case 512:
633 case 1024:
634 case 2048:
635 case 4096:
636 break;
637 default:
638 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
639 radeon_gart_size);
640 radeon_gart_size = 512;
641 break;
642 }
643 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
644 /* AGP mode can only be -1, 1, 2, 4, 8 */
645 switch (radeon_agpmode) {
646 case -1:
647 case 0:
648 case 1:
649 case 2:
650 case 4:
651 case 8:
652 break;
653 default:
654 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
655 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
656 radeon_agpmode = 0;
657 break;
658 }
659}
660
6a9ee8af
DA
661static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
662{
663 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af
DA
664 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
665 if (state == VGA_SWITCHEROO_ON) {
666 printk(KERN_INFO "radeon: switched on\n");
667 /* don't suspend or resume card normally */
5bcf719b 668 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 669 radeon_resume_kms(dev);
5bcf719b 670 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 671 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
672 } else {
673 printk(KERN_INFO "radeon: switched off\n");
fbf81762 674 drm_kms_helper_poll_disable(dev);
5bcf719b 675 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 676 radeon_suspend_kms(dev, pmm);
5bcf719b 677 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
678 }
679}
680
681static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
682{
683 struct drm_device *dev = pci_get_drvdata(pdev);
684 bool can_switch;
685
686 spin_lock(&dev->count_lock);
687 can_switch = (dev->open_count == 0);
688 spin_unlock(&dev->count_lock);
689 return can_switch;
690}
691
692
771fe6b9
JG
693int radeon_device_init(struct radeon_device *rdev,
694 struct drm_device *ddev,
695 struct pci_dev *pdev,
696 uint32_t flags)
697{
351a52a2 698 int r, i;
ad49f501 699 int dma_bits;
771fe6b9 700
771fe6b9 701 rdev->shutdown = false;
9f022ddf 702 rdev->dev = &pdev->dev;
771fe6b9
JG
703 rdev->ddev = ddev;
704 rdev->pdev = pdev;
705 rdev->flags = flags;
706 rdev->family = flags & RADEON_FAMILY_MASK;
707 rdev->is_atom_bios = false;
708 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
709 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
710 rdev->gpu_lockup = false;
733289c2 711 rdev->accel_working = false;
1b5331d9 712
d522d9cc
TR
713 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
714 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
715 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 716
771fe6b9
JG
717 /* mutex initialization are all done here so we
718 * can recall function without having locking issues */
7a1619b9 719 radeon_mutex_init(&rdev->cs_mutex);
771fe6b9
JG
720 mutex_init(&rdev->ib_pool.mutex);
721 mutex_init(&rdev->cp.mutex);
40bacf16 722 mutex_init(&rdev->dc_hw_i2c_mutex);
d8f60cfc
AD
723 if (rdev->family >= CHIP_R600)
724 spin_lock_init(&rdev->ih.lock);
4c788679 725 mutex_init(&rdev->gem.mutex);
c913e23a 726 mutex_init(&rdev->pm.mutex);
5876dd24 727 mutex_init(&rdev->vram_mutex);
7465280c 728 rwlock_init(&rdev->fence_lock);
15d3332f 729 rwlock_init(&rdev->semaphore_drv.lock);
9f022ddf 730 INIT_LIST_HEAD(&rdev->gem.objects);
73a6d3fc 731 init_waitqueue_head(&rdev->irq.vblank_queue);
2031f77c 732 init_waitqueue_head(&rdev->irq.idle_queue);
15d3332f 733 INIT_LIST_HEAD(&rdev->semaphore_drv.free);
771fe6b9 734
4aac0473
JG
735 /* Set asic functions */
736 r = radeon_asic_init(rdev);
36421338 737 if (r)
4aac0473 738 return r;
36421338 739 radeon_check_arguments(rdev);
4aac0473 740
f95df9ca
AD
741 /* all of the newer IGP chips have an internal gart
742 * However some rs4xx report as AGP, so remove that here.
743 */
744 if ((rdev->family >= CHIP_RS400) &&
745 (rdev->flags & RADEON_IS_IGP)) {
746 rdev->flags &= ~RADEON_IS_AGP;
747 }
748
30256a3f 749 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 750 radeon_agp_disable(rdev);
771fe6b9
JG
751 }
752
ad49f501
DA
753 /* set DMA mask + need_dma32 flags.
754 * PCIE - can handle 40-bits.
005a83f1 755 * IGP - can handle 40-bits
ad49f501 756 * AGP - generally dma32 is safest
005a83f1 757 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
758 */
759 rdev->need_dma32 = false;
760 if (rdev->flags & RADEON_IS_AGP)
761 rdev->need_dma32 = true;
005a83f1
AD
762 if ((rdev->flags & RADEON_IS_PCI) &&
763 (rdev->family < CHIP_RS400))
ad49f501
DA
764 rdev->need_dma32 = true;
765
766 dma_bits = rdev->need_dma32 ? 32 : 40;
767 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 768 if (r) {
62fff811 769 rdev->need_dma32 = true;
c52494f6 770 dma_bits = 32;
771fe6b9
JG
771 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
772 }
c52494f6
KRW
773 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
774 if (r) {
775 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
776 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
777 }
771fe6b9
JG
778
779 /* Registers mapping */
780 /* TODO: block userspace mapping of io register */
01d73a69
JC
781 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
782 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
771fe6b9
JG
783 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
784 if (rdev->rmmio == NULL) {
785 return -ENOMEM;
786 }
787 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
788 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
789
351a52a2
AD
790 /* io port mapping */
791 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
792 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
793 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
794 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
795 break;
796 }
797 }
798 if (rdev->rio_mem == NULL)
799 DRM_ERROR("Unable to find PCI I/O BAR\n");
800
28d52043 801 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
802 /* this will fail for cards that aren't VGA class devices, just
803 * ignore it */
804 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
6a9ee8af
DA
805 vga_switcheroo_register_client(rdev->pdev,
806 radeon_switcheroo_set_state,
8d608aa6 807 NULL,
6a9ee8af 808 radeon_switcheroo_can_switch);
28d52043 809
3ce0a23d 810 r = radeon_init(rdev);
b574f251 811 if (r)
3ce0a23d 812 return r;
3ce0a23d 813
b574f251
JG
814 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
815 /* Acceleration not working on AGP card try again
816 * with fallback to PCI or PCIE GART
817 */
a2d07b74 818 radeon_asic_reset(rdev);
b574f251
JG
819 radeon_fini(rdev);
820 radeon_agp_disable(rdev);
821 r = radeon_init(rdev);
4aac0473
JG
822 if (r)
823 return r;
771fe6b9 824 }
ecc0b326
MD
825 if (radeon_testing) {
826 radeon_test_moves(rdev);
827 }
771fe6b9 828 if (radeon_benchmarking) {
638dd7db 829 radeon_benchmark(rdev, radeon_benchmarking);
771fe6b9 830 }
6cf8a3f5 831 return 0;
771fe6b9
JG
832}
833
4d8bf9ae
CK
834static void radeon_debugfs_remove_files(struct radeon_device *rdev);
835
771fe6b9
JG
836void radeon_device_fini(struct radeon_device *rdev)
837{
771fe6b9
JG
838 DRM_INFO("radeon: finishing device.\n");
839 rdev->shutdown = true;
90aca4d2
JG
840 /* evict vram memory */
841 radeon_bo_evict_vram(rdev);
62a8ea3f 842 radeon_fini(rdev);
6a9ee8af 843 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 844 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
845 if (rdev->rio_mem)
846 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 847 rdev->rio_mem = NULL;
771fe6b9
JG
848 iounmap(rdev->rmmio);
849 rdev->rmmio = NULL;
4d8bf9ae 850 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
851}
852
853
854/*
855 * Suspend & resume.
856 */
857int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
858{
875c1866 859 struct radeon_device *rdev;
771fe6b9 860 struct drm_crtc *crtc;
d8dcaa1d 861 struct drm_connector *connector;
7465280c 862 int i, r;
771fe6b9 863
875c1866 864 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
865 return -ENODEV;
866 }
867 if (state.event == PM_EVENT_PRETHAW) {
868 return 0;
869 }
875c1866
DJ
870 rdev = dev->dev_private;
871
5bcf719b 872 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 873 return 0;
d8dcaa1d
AD
874
875 /* turn off display hw */
876 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
877 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
878 }
879
771fe6b9
JG
880 /* unpin the front buffers */
881 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
882 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 883 struct radeon_bo *robj;
771fe6b9
JG
884
885 if (rfb == NULL || rfb->obj == NULL) {
886 continue;
887 }
7e4d15d9 888 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
889 /* don't unpin kernel fb objects */
890 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 891 r = radeon_bo_reserve(robj, false);
38651674 892 if (r == 0) {
4c788679
JG
893 radeon_bo_unpin(robj);
894 radeon_bo_unreserve(robj);
895 }
771fe6b9
JG
896 }
897 }
898 /* evict vram memory */
4c788679 899 radeon_bo_evict_vram(rdev);
771fe6b9 900 /* wait for gpu to finish processing current batch */
7465280c
AD
901 for (i = 0; i < RADEON_NUM_RINGS; i++)
902 radeon_fence_wait_last(rdev, i);
771fe6b9 903
f657c2a7
YZ
904 radeon_save_bios_scratch_regs(rdev);
905
ce8f5370 906 radeon_pm_suspend(rdev);
62a8ea3f 907 radeon_suspend(rdev);
d4877cf2 908 radeon_hpd_fini(rdev);
771fe6b9 909 /* evict remaining vram memory */
4c788679 910 radeon_bo_evict_vram(rdev);
771fe6b9 911
10b06122
JG
912 radeon_agp_suspend(rdev);
913
771fe6b9
JG
914 pci_save_state(dev->pdev);
915 if (state.event == PM_EVENT_SUSPEND) {
916 /* Shut down the device */
917 pci_disable_device(dev->pdev);
918 pci_set_power_state(dev->pdev, PCI_D3hot);
919 }
ac751efa 920 console_lock();
38651674 921 radeon_fbdev_set_suspend(rdev, 1);
ac751efa 922 console_unlock();
771fe6b9
JG
923 return 0;
924}
925
926int radeon_resume_kms(struct drm_device *dev)
927{
09bdf591 928 struct drm_connector *connector;
771fe6b9 929 struct radeon_device *rdev = dev->dev_private;
771fe6b9 930
5bcf719b 931 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
932 return 0;
933
ac751efa 934 console_lock();
771fe6b9
JG
935 pci_set_power_state(dev->pdev, PCI_D0);
936 pci_restore_state(dev->pdev);
937 if (pci_enable_device(dev->pdev)) {
ac751efa 938 console_unlock();
771fe6b9
JG
939 return -1;
940 }
941 pci_set_master(dev->pdev);
0ebf1717
DA
942 /* resume AGP if in use */
943 radeon_agp_resume(rdev);
62a8ea3f 944 radeon_resume(rdev);
ce8f5370 945 radeon_pm_resume(rdev);
f657c2a7 946 radeon_restore_bios_scratch_regs(rdev);
09bdf591 947
38651674 948 radeon_fbdev_set_suspend(rdev, 0);
ac751efa 949 console_unlock();
771fe6b9 950
ac89af1e
AD
951 /* init dig PHYs */
952 if (rdev->is_atom_bios)
953 radeon_atom_encoder_init(rdev);
d4877cf2
AD
954 /* reset hpd state */
955 radeon_hpd_init(rdev);
771fe6b9
JG
956 /* blat the mode back in */
957 drm_helper_resume_force_mode(dev);
a93f344d
AD
958 /* turn on display hw */
959 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
960 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
961 }
771fe6b9
JG
962 return 0;
963}
964
90aca4d2
JG
965int radeon_gpu_reset(struct radeon_device *rdev)
966{
967 int r;
8fd1b84c 968 int resched;
90aca4d2 969
7a1619b9
MD
970 /* Prevent CS ioctl from interfering */
971 radeon_mutex_lock(&rdev->cs_mutex);
972
90aca4d2 973 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
974 /* block TTM */
975 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2
JG
976 radeon_suspend(rdev);
977
978 r = radeon_asic_reset(rdev);
979 if (!r) {
980 dev_info(rdev->dev, "GPU reset succeed\n");
981 radeon_resume(rdev);
982 radeon_restore_bios_scratch_regs(rdev);
983 drm_helper_resume_force_mode(rdev->ddev);
8fd1b84c 984 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
90aca4d2 985 }
7a1619b9
MD
986
987 radeon_mutex_unlock(&rdev->cs_mutex);
988
989 if (r) {
990 /* bad news, how to tell it to userspace ? */
991 dev_info(rdev->dev, "GPU reset failed\n");
992 }
993
90aca4d2
JG
994 return r;
995}
996
771fe6b9
JG
997
998/*
999 * Debugfs
1000 */
771fe6b9
JG
1001int radeon_debugfs_add_files(struct radeon_device *rdev,
1002 struct drm_info_list *files,
1003 unsigned nfiles)
1004{
1005 unsigned i;
1006
4d8bf9ae
CK
1007 for (i = 0; i < rdev->debugfs_count; i++) {
1008 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1009 /* Already registered */
1010 return 0;
1011 }
1012 }
c245cb9e 1013
4d8bf9ae 1014 i = rdev->debugfs_count + 1;
c245cb9e
MW
1015 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1016 DRM_ERROR("Reached maximum number of debugfs components.\n");
1017 DRM_ERROR("Report so we increase "
1018 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1019 return -EINVAL;
1020 }
4d8bf9ae
CK
1021 rdev->debugfs[rdev->debugfs_count].files = files;
1022 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1023 rdev->debugfs_count = i;
771fe6b9
JG
1024#if defined(CONFIG_DEBUG_FS)
1025 drm_debugfs_create_files(files, nfiles,
1026 rdev->ddev->control->debugfs_root,
1027 rdev->ddev->control);
1028 drm_debugfs_create_files(files, nfiles,
1029 rdev->ddev->primary->debugfs_root,
1030 rdev->ddev->primary);
1031#endif
1032 return 0;
1033}
1034
4d8bf9ae
CK
1035static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1036{
1037#if defined(CONFIG_DEBUG_FS)
1038 unsigned i;
1039
1040 for (i = 0; i < rdev->debugfs_count; i++) {
1041 drm_debugfs_remove_files(rdev->debugfs[i].files,
1042 rdev->debugfs[i].num_files,
1043 rdev->ddev->control);
1044 drm_debugfs_remove_files(rdev->debugfs[i].files,
1045 rdev->debugfs[i].num_files,
1046 rdev->ddev->primary);
1047 }
1048#endif
1049}
1050
771fe6b9
JG
1051#if defined(CONFIG_DEBUG_FS)
1052int radeon_debugfs_init(struct drm_minor *minor)
1053{
1054 return 0;
1055}
1056
1057void radeon_debugfs_cleanup(struct drm_minor *minor)
1058{
771fe6b9
JG
1059}
1060#endif