]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #include <linux/console.h> | |
29 | #include <linux/slab.h> | |
30 | #include <drm/drmP.h> | |
31 | #include <drm/drm_crtc_helper.h> | |
32 | #include <drm/radeon_drm.h> | |
33 | #include <linux/vgaarb.h> | |
34 | #include <linux/vga_switcheroo.h> | |
35 | #include "radeon_reg.h" | |
36 | #include "radeon.h" | |
37 | #include "atom.h" | |
38 | ||
39 | static const char radeon_family_name[][16] = { | |
40 | "R100", | |
41 | "RV100", | |
42 | "RS100", | |
43 | "RV200", | |
44 | "RS200", | |
45 | "R200", | |
46 | "RV250", | |
47 | "RS300", | |
48 | "RV280", | |
49 | "R300", | |
50 | "R350", | |
51 | "RV350", | |
52 | "RV380", | |
53 | "R420", | |
54 | "R423", | |
55 | "RV410", | |
56 | "RS400", | |
57 | "RS480", | |
58 | "RS600", | |
59 | "RS690", | |
60 | "RS740", | |
61 | "RV515", | |
62 | "R520", | |
63 | "RV530", | |
64 | "RV560", | |
65 | "RV570", | |
66 | "R580", | |
67 | "R600", | |
68 | "RV610", | |
69 | "RV630", | |
70 | "RV670", | |
71 | "RV620", | |
72 | "RV635", | |
73 | "RS780", | |
74 | "RS880", | |
75 | "RV770", | |
76 | "RV730", | |
77 | "RV710", | |
78 | "RV740", | |
79 | "CEDAR", | |
80 | "REDWOOD", | |
81 | "JUNIPER", | |
82 | "CYPRESS", | |
83 | "HEMLOCK", | |
84 | "PALM", | |
85 | "LAST", | |
86 | }; | |
87 | ||
88 | /* | |
89 | * Clear GPU surface registers. | |
90 | */ | |
91 | void radeon_surface_init(struct radeon_device *rdev) | |
92 | { | |
93 | /* FIXME: check this out */ | |
94 | if (rdev->family < CHIP_R600) { | |
95 | int i; | |
96 | ||
97 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | |
98 | if (rdev->surface_regs[i].bo) | |
99 | radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); | |
100 | else | |
101 | radeon_clear_surface_reg(rdev, i); | |
102 | } | |
103 | /* enable surfaces */ | |
104 | WREG32(RADEON_SURFACE_CNTL, 0); | |
105 | } | |
106 | } | |
107 | ||
108 | /* | |
109 | * GPU scratch registers helpers function. | |
110 | */ | |
111 | void radeon_scratch_init(struct radeon_device *rdev) | |
112 | { | |
113 | int i; | |
114 | ||
115 | /* FIXME: check this out */ | |
116 | if (rdev->family < CHIP_R300) { | |
117 | rdev->scratch.num_reg = 5; | |
118 | } else { | |
119 | rdev->scratch.num_reg = 7; | |
120 | } | |
121 | rdev->scratch.reg_base = RADEON_SCRATCH_REG0; | |
122 | for (i = 0; i < rdev->scratch.num_reg; i++) { | |
123 | rdev->scratch.free[i] = true; | |
124 | rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); | |
125 | } | |
126 | } | |
127 | ||
128 | int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) | |
129 | { | |
130 | int i; | |
131 | ||
132 | for (i = 0; i < rdev->scratch.num_reg; i++) { | |
133 | if (rdev->scratch.free[i]) { | |
134 | rdev->scratch.free[i] = false; | |
135 | *reg = rdev->scratch.reg[i]; | |
136 | return 0; | |
137 | } | |
138 | } | |
139 | return -EINVAL; | |
140 | } | |
141 | ||
142 | void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) | |
143 | { | |
144 | int i; | |
145 | ||
146 | for (i = 0; i < rdev->scratch.num_reg; i++) { | |
147 | if (rdev->scratch.reg[i] == reg) { | |
148 | rdev->scratch.free[i] = true; | |
149 | return; | |
150 | } | |
151 | } | |
152 | } | |
153 | ||
154 | void radeon_wb_disable(struct radeon_device *rdev) | |
155 | { | |
156 | int r; | |
157 | ||
158 | if (rdev->wb.wb_obj) { | |
159 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | |
160 | if (unlikely(r != 0)) | |
161 | return; | |
162 | radeon_bo_kunmap(rdev->wb.wb_obj); | |
163 | radeon_bo_unpin(rdev->wb.wb_obj); | |
164 | radeon_bo_unreserve(rdev->wb.wb_obj); | |
165 | } | |
166 | rdev->wb.enabled = false; | |
167 | } | |
168 | ||
169 | void radeon_wb_fini(struct radeon_device *rdev) | |
170 | { | |
171 | radeon_wb_disable(rdev); | |
172 | if (rdev->wb.wb_obj) { | |
173 | radeon_bo_unref(&rdev->wb.wb_obj); | |
174 | rdev->wb.wb = NULL; | |
175 | rdev->wb.wb_obj = NULL; | |
176 | } | |
177 | } | |
178 | ||
179 | int radeon_wb_init(struct radeon_device *rdev) | |
180 | { | |
181 | int r; | |
182 | ||
183 | if (rdev->wb.wb_obj == NULL) { | |
184 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, | |
185 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); | |
186 | if (r) { | |
187 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); | |
188 | return r; | |
189 | } | |
190 | } | |
191 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | |
192 | if (unlikely(r != 0)) { | |
193 | radeon_wb_fini(rdev); | |
194 | return r; | |
195 | } | |
196 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | |
197 | &rdev->wb.gpu_addr); | |
198 | if (r) { | |
199 | radeon_bo_unreserve(rdev->wb.wb_obj); | |
200 | dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); | |
201 | radeon_wb_fini(rdev); | |
202 | return r; | |
203 | } | |
204 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | |
205 | radeon_bo_unreserve(rdev->wb.wb_obj); | |
206 | if (r) { | |
207 | dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); | |
208 | radeon_wb_fini(rdev); | |
209 | return r; | |
210 | } | |
211 | ||
212 | /* disable event_write fences */ | |
213 | rdev->wb.use_event = false; | |
214 | /* disabled via module param */ | |
215 | if (radeon_no_wb == 1) | |
216 | rdev->wb.enabled = false; | |
217 | else { | |
218 | /* often unreliable on AGP */ | |
219 | if (rdev->flags & RADEON_IS_AGP) { | |
220 | rdev->wb.enabled = false; | |
221 | } else { | |
222 | rdev->wb.enabled = true; | |
223 | /* event_write fences are only available on r600+ */ | |
224 | if (rdev->family >= CHIP_R600) | |
225 | rdev->wb.use_event = true; | |
226 | } | |
227 | } | |
228 | ||
229 | dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); | |
230 | ||
231 | return 0; | |
232 | } | |
233 | ||
234 | /** | |
235 | * radeon_vram_location - try to find VRAM location | |
236 | * @rdev: radeon device structure holding all necessary informations | |
237 | * @mc: memory controller structure holding memory informations | |
238 | * @base: base address at which to put VRAM | |
239 | * | |
240 | * Function will place try to place VRAM at base address provided | |
241 | * as parameter (which is so far either PCI aperture address or | |
242 | * for IGP TOM base address). | |
243 | * | |
244 | * If there is not enough space to fit the unvisible VRAM in the 32bits | |
245 | * address space then we limit the VRAM size to the aperture. | |
246 | * | |
247 | * If we are using AGP and if the AGP aperture doesn't allow us to have | |
248 | * room for all the VRAM than we restrict the VRAM to the PCI aperture | |
249 | * size and print a warning. | |
250 | * | |
251 | * This function will never fails, worst case are limiting VRAM. | |
252 | * | |
253 | * Note: GTT start, end, size should be initialized before calling this | |
254 | * function on AGP platform. | |
255 | * | |
256 | * Note: We don't explictly enforce VRAM start to be aligned on VRAM size, | |
257 | * this shouldn't be a problem as we are using the PCI aperture as a reference. | |
258 | * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but | |
259 | * not IGP. | |
260 | * | |
261 | * Note: we use mc_vram_size as on some board we need to program the mc to | |
262 | * cover the whole aperture even if VRAM size is inferior to aperture size | |
263 | * Novell bug 204882 + along with lots of ubuntu ones | |
264 | * | |
265 | * Note: when limiting vram it's safe to overwritte real_vram_size because | |
266 | * we are not in case where real_vram_size is inferior to mc_vram_size (ie | |
267 | * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu | |
268 | * ones) | |
269 | * | |
270 | * Note: IGP TOM addr should be the same as the aperture addr, we don't | |
271 | * explicitly check for that thought. | |
272 | * | |
273 | * FIXME: when reducing VRAM size align new size on power of 2. | |
274 | */ | |
275 | void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) | |
276 | { | |
277 | mc->vram_start = base; | |
278 | if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { | |
279 | dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); | |
280 | mc->real_vram_size = mc->aper_size; | |
281 | mc->mc_vram_size = mc->aper_size; | |
282 | } | |
283 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | |
284 | if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { | |
285 | dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); | |
286 | mc->real_vram_size = mc->aper_size; | |
287 | mc->mc_vram_size = mc->aper_size; | |
288 | } | |
289 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | |
290 | dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", | |
291 | mc->mc_vram_size >> 20, mc->vram_start, | |
292 | mc->vram_end, mc->real_vram_size >> 20); | |
293 | } | |
294 | ||
295 | /** | |
296 | * radeon_gtt_location - try to find GTT location | |
297 | * @rdev: radeon device structure holding all necessary informations | |
298 | * @mc: memory controller structure holding memory informations | |
299 | * | |
300 | * Function will place try to place GTT before or after VRAM. | |
301 | * | |
302 | * If GTT size is bigger than space left then we ajust GTT size. | |
303 | * Thus function will never fails. | |
304 | * | |
305 | * FIXME: when reducing GTT size align new size on power of 2. | |
306 | */ | |
307 | void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |
308 | { | |
309 | u64 size_af, size_bf; | |
310 | ||
311 | size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; | |
312 | size_bf = mc->vram_start & ~mc->gtt_base_align; | |
313 | if (size_bf > size_af) { | |
314 | if (mc->gtt_size > size_bf) { | |
315 | dev_warn(rdev->dev, "limiting GTT\n"); | |
316 | mc->gtt_size = size_bf; | |
317 | } | |
318 | mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; | |
319 | } else { | |
320 | if (mc->gtt_size > size_af) { | |
321 | dev_warn(rdev->dev, "limiting GTT\n"); | |
322 | mc->gtt_size = size_af; | |
323 | } | |
324 | mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; | |
325 | } | |
326 | mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; | |
327 | dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", | |
328 | mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); | |
329 | } | |
330 | ||
331 | /* | |
332 | * GPU helpers function. | |
333 | */ | |
334 | bool radeon_card_posted(struct radeon_device *rdev) | |
335 | { | |
336 | uint32_t reg; | |
337 | ||
338 | /* first check CRTCs */ | |
339 | if (ASIC_IS_DCE41(rdev)) { | |
340 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | | |
341 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); | |
342 | if (reg & EVERGREEN_CRTC_MASTER_EN) | |
343 | return true; | |
344 | } else if (ASIC_IS_DCE4(rdev)) { | |
345 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | | |
346 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | | |
347 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | | |
348 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | | |
349 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | | |
350 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); | |
351 | if (reg & EVERGREEN_CRTC_MASTER_EN) | |
352 | return true; | |
353 | } else if (ASIC_IS_AVIVO(rdev)) { | |
354 | reg = RREG32(AVIVO_D1CRTC_CONTROL) | | |
355 | RREG32(AVIVO_D2CRTC_CONTROL); | |
356 | if (reg & AVIVO_CRTC_EN) { | |
357 | return true; | |
358 | } | |
359 | } else { | |
360 | reg = RREG32(RADEON_CRTC_GEN_CNTL) | | |
361 | RREG32(RADEON_CRTC2_GEN_CNTL); | |
362 | if (reg & RADEON_CRTC_EN) { | |
363 | return true; | |
364 | } | |
365 | } | |
366 | ||
367 | /* then check MEM_SIZE, in case the crtcs are off */ | |
368 | if (rdev->family >= CHIP_R600) | |
369 | reg = RREG32(R600_CONFIG_MEMSIZE); | |
370 | else | |
371 | reg = RREG32(RADEON_CONFIG_MEMSIZE); | |
372 | ||
373 | if (reg) | |
374 | return true; | |
375 | ||
376 | return false; | |
377 | ||
378 | } | |
379 | ||
380 | void radeon_update_bandwidth_info(struct radeon_device *rdev) | |
381 | { | |
382 | fixed20_12 a; | |
383 | u32 sclk = rdev->pm.current_sclk; | |
384 | u32 mclk = rdev->pm.current_mclk; | |
385 | ||
386 | /* sclk/mclk in Mhz */ | |
387 | a.full = dfixed_const(100); | |
388 | rdev->pm.sclk.full = dfixed_const(sclk); | |
389 | rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); | |
390 | rdev->pm.mclk.full = dfixed_const(mclk); | |
391 | rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); | |
392 | ||
393 | if (rdev->flags & RADEON_IS_IGP) { | |
394 | a.full = dfixed_const(16); | |
395 | /* core_bandwidth = sclk(Mhz) * 16 */ | |
396 | rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); | |
397 | } | |
398 | } | |
399 | ||
400 | bool radeon_boot_test_post_card(struct radeon_device *rdev) | |
401 | { | |
402 | if (radeon_card_posted(rdev)) | |
403 | return true; | |
404 | ||
405 | if (rdev->bios) { | |
406 | DRM_INFO("GPU not posted. posting now...\n"); | |
407 | if (rdev->is_atom_bios) | |
408 | atom_asic_init(rdev->mode_info.atom_context); | |
409 | else | |
410 | radeon_combios_asic_init(rdev->ddev); | |
411 | return true; | |
412 | } else { | |
413 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | |
414 | return false; | |
415 | } | |
416 | } | |
417 | ||
418 | int radeon_dummy_page_init(struct radeon_device *rdev) | |
419 | { | |
420 | if (rdev->dummy_page.page) | |
421 | return 0; | |
422 | rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); | |
423 | if (rdev->dummy_page.page == NULL) | |
424 | return -ENOMEM; | |
425 | rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, | |
426 | 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
427 | if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) { | |
428 | dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n"); | |
429 | __free_page(rdev->dummy_page.page); | |
430 | rdev->dummy_page.page = NULL; | |
431 | return -ENOMEM; | |
432 | } | |
433 | return 0; | |
434 | } | |
435 | ||
436 | void radeon_dummy_page_fini(struct radeon_device *rdev) | |
437 | { | |
438 | if (rdev->dummy_page.page == NULL) | |
439 | return; | |
440 | pci_unmap_page(rdev->pdev, rdev->dummy_page.addr, | |
441 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
442 | __free_page(rdev->dummy_page.page); | |
443 | rdev->dummy_page.page = NULL; | |
444 | } | |
445 | ||
446 | ||
447 | /* ATOM accessor methods */ | |
448 | static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) | |
449 | { | |
450 | struct radeon_device *rdev = info->dev->dev_private; | |
451 | uint32_t r; | |
452 | ||
453 | r = rdev->pll_rreg(rdev, reg); | |
454 | return r; | |
455 | } | |
456 | ||
457 | static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) | |
458 | { | |
459 | struct radeon_device *rdev = info->dev->dev_private; | |
460 | ||
461 | rdev->pll_wreg(rdev, reg, val); | |
462 | } | |
463 | ||
464 | static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) | |
465 | { | |
466 | struct radeon_device *rdev = info->dev->dev_private; | |
467 | uint32_t r; | |
468 | ||
469 | r = rdev->mc_rreg(rdev, reg); | |
470 | return r; | |
471 | } | |
472 | ||
473 | static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) | |
474 | { | |
475 | struct radeon_device *rdev = info->dev->dev_private; | |
476 | ||
477 | rdev->mc_wreg(rdev, reg, val); | |
478 | } | |
479 | ||
480 | static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) | |
481 | { | |
482 | struct radeon_device *rdev = info->dev->dev_private; | |
483 | ||
484 | WREG32(reg*4, val); | |
485 | } | |
486 | ||
487 | static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) | |
488 | { | |
489 | struct radeon_device *rdev = info->dev->dev_private; | |
490 | uint32_t r; | |
491 | ||
492 | r = RREG32(reg*4); | |
493 | return r; | |
494 | } | |
495 | ||
496 | static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) | |
497 | { | |
498 | struct radeon_device *rdev = info->dev->dev_private; | |
499 | ||
500 | WREG32_IO(reg*4, val); | |
501 | } | |
502 | ||
503 | static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) | |
504 | { | |
505 | struct radeon_device *rdev = info->dev->dev_private; | |
506 | uint32_t r; | |
507 | ||
508 | r = RREG32_IO(reg*4); | |
509 | return r; | |
510 | } | |
511 | ||
512 | int radeon_atombios_init(struct radeon_device *rdev) | |
513 | { | |
514 | struct card_info *atom_card_info = | |
515 | kzalloc(sizeof(struct card_info), GFP_KERNEL); | |
516 | ||
517 | if (!atom_card_info) | |
518 | return -ENOMEM; | |
519 | ||
520 | rdev->mode_info.atom_card_info = atom_card_info; | |
521 | atom_card_info->dev = rdev->ddev; | |
522 | atom_card_info->reg_read = cail_reg_read; | |
523 | atom_card_info->reg_write = cail_reg_write; | |
524 | /* needed for iio ops */ | |
525 | if (rdev->rio_mem) { | |
526 | atom_card_info->ioreg_read = cail_ioreg_read; | |
527 | atom_card_info->ioreg_write = cail_ioreg_write; | |
528 | } else { | |
529 | DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); | |
530 | atom_card_info->ioreg_read = cail_reg_read; | |
531 | atom_card_info->ioreg_write = cail_reg_write; | |
532 | } | |
533 | atom_card_info->mc_read = cail_mc_read; | |
534 | atom_card_info->mc_write = cail_mc_write; | |
535 | atom_card_info->pll_read = cail_pll_read; | |
536 | atom_card_info->pll_write = cail_pll_write; | |
537 | ||
538 | rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); | |
539 | mutex_init(&rdev->mode_info.atom_context->mutex); | |
540 | radeon_atom_initialize_bios_scratch_regs(rdev->ddev); | |
541 | atom_allocate_fb_scratch(rdev->mode_info.atom_context); | |
542 | return 0; | |
543 | } | |
544 | ||
545 | void radeon_atombios_fini(struct radeon_device *rdev) | |
546 | { | |
547 | if (rdev->mode_info.atom_context) { | |
548 | kfree(rdev->mode_info.atom_context->scratch); | |
549 | kfree(rdev->mode_info.atom_context); | |
550 | } | |
551 | kfree(rdev->mode_info.atom_card_info); | |
552 | } | |
553 | ||
554 | int radeon_combios_init(struct radeon_device *rdev) | |
555 | { | |
556 | radeon_combios_initialize_bios_scratch_regs(rdev->ddev); | |
557 | return 0; | |
558 | } | |
559 | ||
560 | void radeon_combios_fini(struct radeon_device *rdev) | |
561 | { | |
562 | } | |
563 | ||
564 | /* if we get transitioned to only one device, tak VGA back */ | |
565 | static unsigned int radeon_vga_set_decode(void *cookie, bool state) | |
566 | { | |
567 | struct radeon_device *rdev = cookie; | |
568 | radeon_vga_set_state(rdev, state); | |
569 | if (state) | |
570 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | |
571 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
572 | else | |
573 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
574 | } | |
575 | ||
576 | void radeon_check_arguments(struct radeon_device *rdev) | |
577 | { | |
578 | /* vramlimit must be a power of two */ | |
579 | switch (radeon_vram_limit) { | |
580 | case 0: | |
581 | case 4: | |
582 | case 8: | |
583 | case 16: | |
584 | case 32: | |
585 | case 64: | |
586 | case 128: | |
587 | case 256: | |
588 | case 512: | |
589 | case 1024: | |
590 | case 2048: | |
591 | case 4096: | |
592 | break; | |
593 | default: | |
594 | dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", | |
595 | radeon_vram_limit); | |
596 | radeon_vram_limit = 0; | |
597 | break; | |
598 | } | |
599 | radeon_vram_limit = radeon_vram_limit << 20; | |
600 | /* gtt size must be power of two and greater or equal to 32M */ | |
601 | switch (radeon_gart_size) { | |
602 | case 4: | |
603 | case 8: | |
604 | case 16: | |
605 | dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", | |
606 | radeon_gart_size); | |
607 | radeon_gart_size = 512; | |
608 | break; | |
609 | case 32: | |
610 | case 64: | |
611 | case 128: | |
612 | case 256: | |
613 | case 512: | |
614 | case 1024: | |
615 | case 2048: | |
616 | case 4096: | |
617 | break; | |
618 | default: | |
619 | dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", | |
620 | radeon_gart_size); | |
621 | radeon_gart_size = 512; | |
622 | break; | |
623 | } | |
624 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | |
625 | /* AGP mode can only be -1, 1, 2, 4, 8 */ | |
626 | switch (radeon_agpmode) { | |
627 | case -1: | |
628 | case 0: | |
629 | case 1: | |
630 | case 2: | |
631 | case 4: | |
632 | case 8: | |
633 | break; | |
634 | default: | |
635 | dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " | |
636 | "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); | |
637 | radeon_agpmode = 0; | |
638 | break; | |
639 | } | |
640 | } | |
641 | ||
642 | static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) | |
643 | { | |
644 | struct drm_device *dev = pci_get_drvdata(pdev); | |
645 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | |
646 | if (state == VGA_SWITCHEROO_ON) { | |
647 | printk(KERN_INFO "radeon: switched on\n"); | |
648 | /* don't suspend or resume card normally */ | |
649 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
650 | radeon_resume_kms(dev); | |
651 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | |
652 | drm_kms_helper_poll_enable(dev); | |
653 | } else { | |
654 | printk(KERN_INFO "radeon: switched off\n"); | |
655 | drm_kms_helper_poll_disable(dev); | |
656 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
657 | radeon_suspend_kms(dev, pmm); | |
658 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | |
659 | } | |
660 | } | |
661 | ||
662 | static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) | |
663 | { | |
664 | struct drm_device *dev = pci_get_drvdata(pdev); | |
665 | bool can_switch; | |
666 | ||
667 | spin_lock(&dev->count_lock); | |
668 | can_switch = (dev->open_count == 0); | |
669 | spin_unlock(&dev->count_lock); | |
670 | return can_switch; | |
671 | } | |
672 | ||
673 | ||
674 | int radeon_device_init(struct radeon_device *rdev, | |
675 | struct drm_device *ddev, | |
676 | struct pci_dev *pdev, | |
677 | uint32_t flags) | |
678 | { | |
679 | int r, i; | |
680 | int dma_bits; | |
681 | ||
682 | rdev->shutdown = false; | |
683 | rdev->dev = &pdev->dev; | |
684 | rdev->ddev = ddev; | |
685 | rdev->pdev = pdev; | |
686 | rdev->flags = flags; | |
687 | rdev->family = flags & RADEON_FAMILY_MASK; | |
688 | rdev->is_atom_bios = false; | |
689 | rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; | |
690 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | |
691 | rdev->gpu_lockup = false; | |
692 | rdev->accel_working = false; | |
693 | ||
694 | DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n", | |
695 | radeon_family_name[rdev->family], pdev->vendor, pdev->device); | |
696 | ||
697 | /* mutex initialization are all done here so we | |
698 | * can recall function without having locking issues */ | |
699 | mutex_init(&rdev->cs_mutex); | |
700 | mutex_init(&rdev->ib_pool.mutex); | |
701 | mutex_init(&rdev->cp.mutex); | |
702 | mutex_init(&rdev->dc_hw_i2c_mutex); | |
703 | if (rdev->family >= CHIP_R600) | |
704 | spin_lock_init(&rdev->ih.lock); | |
705 | mutex_init(&rdev->gem.mutex); | |
706 | mutex_init(&rdev->pm.mutex); | |
707 | mutex_init(&rdev->vram_mutex); | |
708 | rwlock_init(&rdev->fence_drv.lock); | |
709 | INIT_LIST_HEAD(&rdev->gem.objects); | |
710 | init_waitqueue_head(&rdev->irq.vblank_queue); | |
711 | init_waitqueue_head(&rdev->irq.idle_queue); | |
712 | ||
713 | /* setup workqueue */ | |
714 | rdev->wq = create_workqueue("radeon"); | |
715 | if (rdev->wq == NULL) | |
716 | return -ENOMEM; | |
717 | ||
718 | /* Set asic functions */ | |
719 | r = radeon_asic_init(rdev); | |
720 | if (r) | |
721 | return r; | |
722 | radeon_check_arguments(rdev); | |
723 | ||
724 | /* all of the newer IGP chips have an internal gart | |
725 | * However some rs4xx report as AGP, so remove that here. | |
726 | */ | |
727 | if ((rdev->family >= CHIP_RS400) && | |
728 | (rdev->flags & RADEON_IS_IGP)) { | |
729 | rdev->flags &= ~RADEON_IS_AGP; | |
730 | } | |
731 | ||
732 | if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { | |
733 | radeon_agp_disable(rdev); | |
734 | } | |
735 | ||
736 | /* set DMA mask + need_dma32 flags. | |
737 | * PCIE - can handle 40-bits. | |
738 | * IGP - can handle 40-bits (in theory) | |
739 | * AGP - generally dma32 is safest | |
740 | * PCI - only dma32 | |
741 | */ | |
742 | rdev->need_dma32 = false; | |
743 | if (rdev->flags & RADEON_IS_AGP) | |
744 | rdev->need_dma32 = true; | |
745 | if (rdev->flags & RADEON_IS_PCI) | |
746 | rdev->need_dma32 = true; | |
747 | ||
748 | dma_bits = rdev->need_dma32 ? 32 : 40; | |
749 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); | |
750 | if (r) { | |
751 | printk(KERN_WARNING "radeon: No suitable DMA available.\n"); | |
752 | } | |
753 | ||
754 | /* Registers mapping */ | |
755 | /* TODO: block userspace mapping of io register */ | |
756 | rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); | |
757 | rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); | |
758 | rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); | |
759 | if (rdev->rmmio == NULL) { | |
760 | return -ENOMEM; | |
761 | } | |
762 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); | |
763 | DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); | |
764 | ||
765 | /* io port mapping */ | |
766 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
767 | if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) { | |
768 | rdev->rio_mem_size = pci_resource_len(rdev->pdev, i); | |
769 | rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size); | |
770 | break; | |
771 | } | |
772 | } | |
773 | if (rdev->rio_mem == NULL) | |
774 | DRM_ERROR("Unable to find PCI I/O BAR\n"); | |
775 | ||
776 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ | |
777 | /* this will fail for cards that aren't VGA class devices, just | |
778 | * ignore it */ | |
779 | vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); | |
780 | vga_switcheroo_register_client(rdev->pdev, | |
781 | radeon_switcheroo_set_state, | |
782 | NULL, | |
783 | radeon_switcheroo_can_switch); | |
784 | ||
785 | r = radeon_init(rdev); | |
786 | if (r) | |
787 | return r; | |
788 | ||
789 | if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { | |
790 | /* Acceleration not working on AGP card try again | |
791 | * with fallback to PCI or PCIE GART | |
792 | */ | |
793 | radeon_asic_reset(rdev); | |
794 | radeon_fini(rdev); | |
795 | radeon_agp_disable(rdev); | |
796 | r = radeon_init(rdev); | |
797 | if (r) | |
798 | return r; | |
799 | } | |
800 | if (radeon_testing) { | |
801 | radeon_test_moves(rdev); | |
802 | } | |
803 | if (radeon_benchmarking) { | |
804 | radeon_benchmark(rdev); | |
805 | } | |
806 | return 0; | |
807 | } | |
808 | ||
809 | void radeon_device_fini(struct radeon_device *rdev) | |
810 | { | |
811 | DRM_INFO("radeon: finishing device.\n"); | |
812 | rdev->shutdown = true; | |
813 | /* evict vram memory */ | |
814 | radeon_bo_evict_vram(rdev); | |
815 | radeon_fini(rdev); | |
816 | destroy_workqueue(rdev->wq); | |
817 | vga_switcheroo_unregister_client(rdev->pdev); | |
818 | vga_client_register(rdev->pdev, NULL, NULL, NULL); | |
819 | if (rdev->rio_mem) | |
820 | pci_iounmap(rdev->pdev, rdev->rio_mem); | |
821 | rdev->rio_mem = NULL; | |
822 | iounmap(rdev->rmmio); | |
823 | rdev->rmmio = NULL; | |
824 | } | |
825 | ||
826 | ||
827 | /* | |
828 | * Suspend & resume. | |
829 | */ | |
830 | int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |
831 | { | |
832 | struct radeon_device *rdev; | |
833 | struct drm_crtc *crtc; | |
834 | struct drm_connector *connector; | |
835 | int r; | |
836 | ||
837 | if (dev == NULL || dev->dev_private == NULL) { | |
838 | return -ENODEV; | |
839 | } | |
840 | if (state.event == PM_EVENT_PRETHAW) { | |
841 | return 0; | |
842 | } | |
843 | rdev = dev->dev_private; | |
844 | ||
845 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
846 | return 0; | |
847 | ||
848 | /* turn off display hw */ | |
849 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | |
850 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | |
851 | } | |
852 | ||
853 | /* unpin the front buffers */ | |
854 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
855 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); | |
856 | struct radeon_bo *robj; | |
857 | ||
858 | if (rfb == NULL || rfb->obj == NULL) { | |
859 | continue; | |
860 | } | |
861 | robj = rfb->obj->driver_private; | |
862 | /* don't unpin kernel fb objects */ | |
863 | if (!radeon_fbdev_robj_is_fb(rdev, robj)) { | |
864 | r = radeon_bo_reserve(robj, false); | |
865 | if (r == 0) { | |
866 | radeon_bo_unpin(robj); | |
867 | radeon_bo_unreserve(robj); | |
868 | } | |
869 | } | |
870 | } | |
871 | /* evict vram memory */ | |
872 | radeon_bo_evict_vram(rdev); | |
873 | /* wait for gpu to finish processing current batch */ | |
874 | radeon_fence_wait_last(rdev); | |
875 | ||
876 | radeon_save_bios_scratch_regs(rdev); | |
877 | ||
878 | radeon_pm_suspend(rdev); | |
879 | radeon_suspend(rdev); | |
880 | radeon_hpd_fini(rdev); | |
881 | /* evict remaining vram memory */ | |
882 | radeon_bo_evict_vram(rdev); | |
883 | ||
884 | radeon_agp_suspend(rdev); | |
885 | ||
886 | pci_save_state(dev->pdev); | |
887 | if (state.event == PM_EVENT_SUSPEND) { | |
888 | /* Shut down the device */ | |
889 | pci_disable_device(dev->pdev); | |
890 | pci_set_power_state(dev->pdev, PCI_D3hot); | |
891 | } | |
892 | acquire_console_sem(); | |
893 | radeon_fbdev_set_suspend(rdev, 1); | |
894 | release_console_sem(); | |
895 | return 0; | |
896 | } | |
897 | ||
898 | int radeon_resume_kms(struct drm_device *dev) | |
899 | { | |
900 | struct drm_connector *connector; | |
901 | struct radeon_device *rdev = dev->dev_private; | |
902 | ||
903 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | |
904 | return 0; | |
905 | ||
906 | acquire_console_sem(); | |
907 | pci_set_power_state(dev->pdev, PCI_D0); | |
908 | pci_restore_state(dev->pdev); | |
909 | if (pci_enable_device(dev->pdev)) { | |
910 | release_console_sem(); | |
911 | return -1; | |
912 | } | |
913 | pci_set_master(dev->pdev); | |
914 | /* resume AGP if in use */ | |
915 | radeon_agp_resume(rdev); | |
916 | radeon_resume(rdev); | |
917 | radeon_pm_resume(rdev); | |
918 | radeon_restore_bios_scratch_regs(rdev); | |
919 | ||
920 | radeon_fbdev_set_suspend(rdev, 0); | |
921 | release_console_sem(); | |
922 | ||
923 | /* reset hpd state */ | |
924 | radeon_hpd_init(rdev); | |
925 | /* blat the mode back in */ | |
926 | drm_helper_resume_force_mode(dev); | |
927 | /* turn on display hw */ | |
928 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | |
929 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | |
930 | } | |
931 | return 0; | |
932 | } | |
933 | ||
934 | int radeon_gpu_reset(struct radeon_device *rdev) | |
935 | { | |
936 | int r; | |
937 | ||
938 | radeon_save_bios_scratch_regs(rdev); | |
939 | radeon_suspend(rdev); | |
940 | ||
941 | r = radeon_asic_reset(rdev); | |
942 | if (!r) { | |
943 | dev_info(rdev->dev, "GPU reset succeed\n"); | |
944 | radeon_resume(rdev); | |
945 | radeon_restore_bios_scratch_regs(rdev); | |
946 | drm_helper_resume_force_mode(rdev->ddev); | |
947 | return 0; | |
948 | } | |
949 | /* bad news, how to tell it to userspace ? */ | |
950 | dev_info(rdev->dev, "GPU reset failed\n"); | |
951 | return r; | |
952 | } | |
953 | ||
954 | ||
955 | /* | |
956 | * Debugfs | |
957 | */ | |
958 | struct radeon_debugfs { | |
959 | struct drm_info_list *files; | |
960 | unsigned num_files; | |
961 | }; | |
962 | static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES]; | |
963 | static unsigned _radeon_debugfs_count = 0; | |
964 | ||
965 | int radeon_debugfs_add_files(struct radeon_device *rdev, | |
966 | struct drm_info_list *files, | |
967 | unsigned nfiles) | |
968 | { | |
969 | unsigned i; | |
970 | ||
971 | for (i = 0; i < _radeon_debugfs_count; i++) { | |
972 | if (_radeon_debugfs[i].files == files) { | |
973 | /* Already registered */ | |
974 | return 0; | |
975 | } | |
976 | } | |
977 | if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) { | |
978 | DRM_ERROR("Reached maximum number of debugfs files.\n"); | |
979 | DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n"); | |
980 | return -EINVAL; | |
981 | } | |
982 | _radeon_debugfs[_radeon_debugfs_count].files = files; | |
983 | _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles; | |
984 | _radeon_debugfs_count++; | |
985 | #if defined(CONFIG_DEBUG_FS) | |
986 | drm_debugfs_create_files(files, nfiles, | |
987 | rdev->ddev->control->debugfs_root, | |
988 | rdev->ddev->control); | |
989 | drm_debugfs_create_files(files, nfiles, | |
990 | rdev->ddev->primary->debugfs_root, | |
991 | rdev->ddev->primary); | |
992 | #endif | |
993 | return 0; | |
994 | } | |
995 | ||
996 | #if defined(CONFIG_DEBUG_FS) | |
997 | int radeon_debugfs_init(struct drm_minor *minor) | |
998 | { | |
999 | return 0; | |
1000 | } | |
1001 | ||
1002 | void radeon_debugfs_cleanup(struct drm_minor *minor) | |
1003 | { | |
1004 | unsigned i; | |
1005 | ||
1006 | for (i = 0; i < _radeon_debugfs_count; i++) { | |
1007 | drm_debugfs_remove_files(_radeon_debugfs[i].files, | |
1008 | _radeon_debugfs[i].num_files, minor); | |
1009 | } | |
1010 | } | |
1011 | #endif |