]>
Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #include <linux/console.h> | |
5a0e3ad6 | 29 | #include <linux/slab.h> |
771fe6b9 JG |
30 | #include <drm/drmP.h> |
31 | #include <drm/drm_crtc_helper.h> | |
32 | #include <drm/radeon_drm.h> | |
28d52043 | 33 | #include <linux/vgaarb.h> |
6a9ee8af | 34 | #include <linux/vga_switcheroo.h> |
771fe6b9 JG |
35 | #include "radeon_reg.h" |
36 | #include "radeon.h" | |
771fe6b9 JG |
37 | #include "atom.h" |
38 | ||
1b5331d9 JG |
39 | static const char radeon_family_name[][16] = { |
40 | "R100", | |
41 | "RV100", | |
42 | "RS100", | |
43 | "RV200", | |
44 | "RS200", | |
45 | "R200", | |
46 | "RV250", | |
47 | "RS300", | |
48 | "RV280", | |
49 | "R300", | |
50 | "R350", | |
51 | "RV350", | |
52 | "RV380", | |
53 | "R420", | |
54 | "R423", | |
55 | "RV410", | |
56 | "RS400", | |
57 | "RS480", | |
58 | "RS600", | |
59 | "RS690", | |
60 | "RS740", | |
61 | "RV515", | |
62 | "R520", | |
63 | "RV530", | |
64 | "RV560", | |
65 | "RV570", | |
66 | "R580", | |
67 | "R600", | |
68 | "RV610", | |
69 | "RV630", | |
70 | "RV670", | |
71 | "RV620", | |
72 | "RV635", | |
73 | "RS780", | |
74 | "RS880", | |
75 | "RV770", | |
76 | "RV730", | |
77 | "RV710", | |
78 | "RV740", | |
79 | "CEDAR", | |
80 | "REDWOOD", | |
81 | "JUNIPER", | |
82 | "CYPRESS", | |
83 | "HEMLOCK", | |
b08ebe7e | 84 | "PALM", |
1fe18305 AD |
85 | "BARTS", |
86 | "TURKS", | |
87 | "CAICOS", | |
b7cfc9fe | 88 | "CAYMAN", |
1b5331d9 JG |
89 | "LAST", |
90 | }; | |
91 | ||
b1e3a6d1 MD |
92 | /* |
93 | * Clear GPU surface registers. | |
94 | */ | |
3ce0a23d | 95 | void radeon_surface_init(struct radeon_device *rdev) |
b1e3a6d1 MD |
96 | { |
97 | /* FIXME: check this out */ | |
98 | if (rdev->family < CHIP_R600) { | |
99 | int i; | |
100 | ||
550e2d92 DA |
101 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { |
102 | if (rdev->surface_regs[i].bo) | |
103 | radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); | |
104 | else | |
105 | radeon_clear_surface_reg(rdev, i); | |
b1e3a6d1 | 106 | } |
e024e110 DA |
107 | /* enable surfaces */ |
108 | WREG32(RADEON_SURFACE_CNTL, 0); | |
b1e3a6d1 MD |
109 | } |
110 | } | |
111 | ||
771fe6b9 JG |
112 | /* |
113 | * GPU scratch registers helpers function. | |
114 | */ | |
3ce0a23d | 115 | void radeon_scratch_init(struct radeon_device *rdev) |
771fe6b9 JG |
116 | { |
117 | int i; | |
118 | ||
119 | /* FIXME: check this out */ | |
120 | if (rdev->family < CHIP_R300) { | |
121 | rdev->scratch.num_reg = 5; | |
122 | } else { | |
123 | rdev->scratch.num_reg = 7; | |
124 | } | |
724c80e1 | 125 | rdev->scratch.reg_base = RADEON_SCRATCH_REG0; |
771fe6b9 JG |
126 | for (i = 0; i < rdev->scratch.num_reg; i++) { |
127 | rdev->scratch.free[i] = true; | |
724c80e1 | 128 | rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); |
771fe6b9 JG |
129 | } |
130 | } | |
131 | ||
132 | int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) | |
133 | { | |
134 | int i; | |
135 | ||
136 | for (i = 0; i < rdev->scratch.num_reg; i++) { | |
137 | if (rdev->scratch.free[i]) { | |
138 | rdev->scratch.free[i] = false; | |
139 | *reg = rdev->scratch.reg[i]; | |
140 | return 0; | |
141 | } | |
142 | } | |
143 | return -EINVAL; | |
144 | } | |
145 | ||
146 | void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) | |
147 | { | |
148 | int i; | |
149 | ||
150 | for (i = 0; i < rdev->scratch.num_reg; i++) { | |
151 | if (rdev->scratch.reg[i] == reg) { | |
152 | rdev->scratch.free[i] = true; | |
153 | return; | |
154 | } | |
155 | } | |
156 | } | |
157 | ||
724c80e1 AD |
158 | void radeon_wb_disable(struct radeon_device *rdev) |
159 | { | |
160 | int r; | |
161 | ||
162 | if (rdev->wb.wb_obj) { | |
163 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | |
164 | if (unlikely(r != 0)) | |
165 | return; | |
166 | radeon_bo_kunmap(rdev->wb.wb_obj); | |
167 | radeon_bo_unpin(rdev->wb.wb_obj); | |
168 | radeon_bo_unreserve(rdev->wb.wb_obj); | |
169 | } | |
170 | rdev->wb.enabled = false; | |
171 | } | |
172 | ||
173 | void radeon_wb_fini(struct radeon_device *rdev) | |
174 | { | |
175 | radeon_wb_disable(rdev); | |
176 | if (rdev->wb.wb_obj) { | |
177 | radeon_bo_unref(&rdev->wb.wb_obj); | |
178 | rdev->wb.wb = NULL; | |
179 | rdev->wb.wb_obj = NULL; | |
180 | } | |
181 | } | |
182 | ||
183 | int radeon_wb_init(struct radeon_device *rdev) | |
184 | { | |
185 | int r; | |
186 | ||
187 | if (rdev->wb.wb_obj == NULL) { | |
441921d5 | 188 | r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, |
724c80e1 AD |
189 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); |
190 | if (r) { | |
191 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); | |
192 | return r; | |
193 | } | |
194 | } | |
195 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | |
196 | if (unlikely(r != 0)) { | |
197 | radeon_wb_fini(rdev); | |
198 | return r; | |
199 | } | |
200 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | |
201 | &rdev->wb.gpu_addr); | |
202 | if (r) { | |
203 | radeon_bo_unreserve(rdev->wb.wb_obj); | |
204 | dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); | |
205 | radeon_wb_fini(rdev); | |
206 | return r; | |
207 | } | |
208 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | |
209 | radeon_bo_unreserve(rdev->wb.wb_obj); | |
210 | if (r) { | |
211 | dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); | |
212 | radeon_wb_fini(rdev); | |
213 | return r; | |
214 | } | |
215 | ||
d0f8a854 AD |
216 | /* disable event_write fences */ |
217 | rdev->wb.use_event = false; | |
724c80e1 AD |
218 | /* disabled via module param */ |
219 | if (radeon_no_wb == 1) | |
220 | rdev->wb.enabled = false; | |
221 | else { | |
222 | /* often unreliable on AGP */ | |
223 | if (rdev->flags & RADEON_IS_AGP) { | |
224 | rdev->wb.enabled = false; | |
d0f8a854 | 225 | } else { |
724c80e1 | 226 | rdev->wb.enabled = true; |
d0f8a854 AD |
227 | /* event_write fences are only available on r600+ */ |
228 | if (rdev->family >= CHIP_R600) | |
229 | rdev->wb.use_event = true; | |
230 | } | |
724c80e1 | 231 | } |
7d52785d AD |
232 | /* always use writeback/events on NI */ |
233 | if (ASIC_IS_DCE5(rdev)) { | |
234 | rdev->wb.enabled = true; | |
235 | rdev->wb.use_event = true; | |
236 | } | |
724c80e1 AD |
237 | |
238 | dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); | |
239 | ||
240 | return 0; | |
241 | } | |
242 | ||
d594e46a JG |
243 | /** |
244 | * radeon_vram_location - try to find VRAM location | |
245 | * @rdev: radeon device structure holding all necessary informations | |
246 | * @mc: memory controller structure holding memory informations | |
247 | * @base: base address at which to put VRAM | |
248 | * | |
249 | * Function will place try to place VRAM at base address provided | |
250 | * as parameter (which is so far either PCI aperture address or | |
251 | * for IGP TOM base address). | |
252 | * | |
253 | * If there is not enough space to fit the unvisible VRAM in the 32bits | |
254 | * address space then we limit the VRAM size to the aperture. | |
255 | * | |
256 | * If we are using AGP and if the AGP aperture doesn't allow us to have | |
257 | * room for all the VRAM than we restrict the VRAM to the PCI aperture | |
258 | * size and print a warning. | |
259 | * | |
260 | * This function will never fails, worst case are limiting VRAM. | |
261 | * | |
262 | * Note: GTT start, end, size should be initialized before calling this | |
263 | * function on AGP platform. | |
264 | * | |
25985edc | 265 | * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size, |
d594e46a JG |
266 | * this shouldn't be a problem as we are using the PCI aperture as a reference. |
267 | * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but | |
268 | * not IGP. | |
269 | * | |
270 | * Note: we use mc_vram_size as on some board we need to program the mc to | |
271 | * cover the whole aperture even if VRAM size is inferior to aperture size | |
272 | * Novell bug 204882 + along with lots of ubuntu ones | |
273 | * | |
274 | * Note: when limiting vram it's safe to overwritte real_vram_size because | |
275 | * we are not in case where real_vram_size is inferior to mc_vram_size (ie | |
276 | * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu | |
277 | * ones) | |
278 | * | |
279 | * Note: IGP TOM addr should be the same as the aperture addr, we don't | |
280 | * explicitly check for that thought. | |
281 | * | |
282 | * FIXME: when reducing VRAM size align new size on power of 2. | |
771fe6b9 | 283 | */ |
d594e46a | 284 | void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) |
771fe6b9 | 285 | { |
d594e46a JG |
286 | mc->vram_start = base; |
287 | if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { | |
288 | dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); | |
289 | mc->real_vram_size = mc->aper_size; | |
290 | mc->mc_vram_size = mc->aper_size; | |
291 | } | |
292 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | |
2cbeb4ef | 293 | if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { |
d594e46a JG |
294 | dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); |
295 | mc->real_vram_size = mc->aper_size; | |
296 | mc->mc_vram_size = mc->aper_size; | |
297 | } | |
298 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | |
dd7cc55a | 299 | dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", |
d594e46a JG |
300 | mc->mc_vram_size >> 20, mc->vram_start, |
301 | mc->vram_end, mc->real_vram_size >> 20); | |
302 | } | |
771fe6b9 | 303 | |
d594e46a JG |
304 | /** |
305 | * radeon_gtt_location - try to find GTT location | |
306 | * @rdev: radeon device structure holding all necessary informations | |
307 | * @mc: memory controller structure holding memory informations | |
308 | * | |
309 | * Function will place try to place GTT before or after VRAM. | |
310 | * | |
311 | * If GTT size is bigger than space left then we ajust GTT size. | |
312 | * Thus function will never fails. | |
313 | * | |
314 | * FIXME: when reducing GTT size align new size on power of 2. | |
315 | */ | |
316 | void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |
317 | { | |
318 | u64 size_af, size_bf; | |
319 | ||
8d369bb1 AD |
320 | size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; |
321 | size_bf = mc->vram_start & ~mc->gtt_base_align; | |
d594e46a JG |
322 | if (size_bf > size_af) { |
323 | if (mc->gtt_size > size_bf) { | |
324 | dev_warn(rdev->dev, "limiting GTT\n"); | |
325 | mc->gtt_size = size_bf; | |
771fe6b9 | 326 | } |
8d369bb1 | 327 | mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; |
771fe6b9 | 328 | } else { |
d594e46a JG |
329 | if (mc->gtt_size > size_af) { |
330 | dev_warn(rdev->dev, "limiting GTT\n"); | |
331 | mc->gtt_size = size_af; | |
332 | } | |
8d369bb1 | 333 | mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; |
771fe6b9 | 334 | } |
d594e46a | 335 | mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; |
dd7cc55a | 336 | dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", |
d594e46a | 337 | mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); |
771fe6b9 JG |
338 | } |
339 | ||
771fe6b9 JG |
340 | /* |
341 | * GPU helpers function. | |
342 | */ | |
9f022ddf | 343 | bool radeon_card_posted(struct radeon_device *rdev) |
771fe6b9 JG |
344 | { |
345 | uint32_t reg; | |
346 | ||
347 | /* first check CRTCs */ | |
18007401 AD |
348 | if (ASIC_IS_DCE41(rdev)) { |
349 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | | |
350 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); | |
351 | if (reg & EVERGREEN_CRTC_MASTER_EN) | |
352 | return true; | |
353 | } else if (ASIC_IS_DCE4(rdev)) { | |
bcc1c2a1 AD |
354 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | |
355 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | | |
356 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | | |
357 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | | |
358 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | | |
359 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); | |
360 | if (reg & EVERGREEN_CRTC_MASTER_EN) | |
361 | return true; | |
362 | } else if (ASIC_IS_AVIVO(rdev)) { | |
771fe6b9 JG |
363 | reg = RREG32(AVIVO_D1CRTC_CONTROL) | |
364 | RREG32(AVIVO_D2CRTC_CONTROL); | |
365 | if (reg & AVIVO_CRTC_EN) { | |
366 | return true; | |
367 | } | |
368 | } else { | |
369 | reg = RREG32(RADEON_CRTC_GEN_CNTL) | | |
370 | RREG32(RADEON_CRTC2_GEN_CNTL); | |
371 | if (reg & RADEON_CRTC_EN) { | |
372 | return true; | |
373 | } | |
374 | } | |
375 | ||
376 | /* then check MEM_SIZE, in case the crtcs are off */ | |
377 | if (rdev->family >= CHIP_R600) | |
378 | reg = RREG32(R600_CONFIG_MEMSIZE); | |
379 | else | |
380 | reg = RREG32(RADEON_CONFIG_MEMSIZE); | |
381 | ||
382 | if (reg) | |
383 | return true; | |
384 | ||
385 | return false; | |
386 | ||
387 | } | |
388 | ||
f47299c5 AD |
389 | void radeon_update_bandwidth_info(struct radeon_device *rdev) |
390 | { | |
391 | fixed20_12 a; | |
8807286e AD |
392 | u32 sclk = rdev->pm.current_sclk; |
393 | u32 mclk = rdev->pm.current_mclk; | |
f47299c5 | 394 | |
8807286e AD |
395 | /* sclk/mclk in Mhz */ |
396 | a.full = dfixed_const(100); | |
397 | rdev->pm.sclk.full = dfixed_const(sclk); | |
398 | rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); | |
399 | rdev->pm.mclk.full = dfixed_const(mclk); | |
400 | rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); | |
f47299c5 | 401 | |
8807286e | 402 | if (rdev->flags & RADEON_IS_IGP) { |
68adac5e | 403 | a.full = dfixed_const(16); |
f47299c5 | 404 | /* core_bandwidth = sclk(Mhz) * 16 */ |
68adac5e | 405 | rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); |
f47299c5 AD |
406 | } |
407 | } | |
408 | ||
72542d77 DA |
409 | bool radeon_boot_test_post_card(struct radeon_device *rdev) |
410 | { | |
411 | if (radeon_card_posted(rdev)) | |
412 | return true; | |
413 | ||
414 | if (rdev->bios) { | |
415 | DRM_INFO("GPU not posted. posting now...\n"); | |
416 | if (rdev->is_atom_bios) | |
417 | atom_asic_init(rdev->mode_info.atom_context); | |
418 | else | |
419 | radeon_combios_asic_init(rdev->ddev); | |
420 | return true; | |
421 | } else { | |
422 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | |
423 | return false; | |
424 | } | |
425 | } | |
426 | ||
3ce0a23d JG |
427 | int radeon_dummy_page_init(struct radeon_device *rdev) |
428 | { | |
82568565 DA |
429 | if (rdev->dummy_page.page) |
430 | return 0; | |
3ce0a23d JG |
431 | rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); |
432 | if (rdev->dummy_page.page == NULL) | |
433 | return -ENOMEM; | |
434 | rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, | |
435 | 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
a30f6fb7 BH |
436 | if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) { |
437 | dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n"); | |
3ce0a23d JG |
438 | __free_page(rdev->dummy_page.page); |
439 | rdev->dummy_page.page = NULL; | |
440 | return -ENOMEM; | |
441 | } | |
442 | return 0; | |
443 | } | |
444 | ||
445 | void radeon_dummy_page_fini(struct radeon_device *rdev) | |
446 | { | |
447 | if (rdev->dummy_page.page == NULL) | |
448 | return; | |
449 | pci_unmap_page(rdev->pdev, rdev->dummy_page.addr, | |
450 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
451 | __free_page(rdev->dummy_page.page); | |
452 | rdev->dummy_page.page = NULL; | |
453 | } | |
454 | ||
771fe6b9 | 455 | |
771fe6b9 JG |
456 | /* ATOM accessor methods */ |
457 | static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) | |
458 | { | |
459 | struct radeon_device *rdev = info->dev->dev_private; | |
460 | uint32_t r; | |
461 | ||
462 | r = rdev->pll_rreg(rdev, reg); | |
463 | return r; | |
464 | } | |
465 | ||
466 | static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) | |
467 | { | |
468 | struct radeon_device *rdev = info->dev->dev_private; | |
469 | ||
470 | rdev->pll_wreg(rdev, reg, val); | |
471 | } | |
472 | ||
473 | static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) | |
474 | { | |
475 | struct radeon_device *rdev = info->dev->dev_private; | |
476 | uint32_t r; | |
477 | ||
478 | r = rdev->mc_rreg(rdev, reg); | |
479 | return r; | |
480 | } | |
481 | ||
482 | static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) | |
483 | { | |
484 | struct radeon_device *rdev = info->dev->dev_private; | |
485 | ||
486 | rdev->mc_wreg(rdev, reg, val); | |
487 | } | |
488 | ||
489 | static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) | |
490 | { | |
491 | struct radeon_device *rdev = info->dev->dev_private; | |
492 | ||
493 | WREG32(reg*4, val); | |
494 | } | |
495 | ||
496 | static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) | |
497 | { | |
498 | struct radeon_device *rdev = info->dev->dev_private; | |
499 | uint32_t r; | |
500 | ||
501 | r = RREG32(reg*4); | |
502 | return r; | |
503 | } | |
504 | ||
351a52a2 AD |
505 | static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) |
506 | { | |
507 | struct radeon_device *rdev = info->dev->dev_private; | |
508 | ||
509 | WREG32_IO(reg*4, val); | |
510 | } | |
511 | ||
512 | static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) | |
513 | { | |
514 | struct radeon_device *rdev = info->dev->dev_private; | |
515 | uint32_t r; | |
516 | ||
517 | r = RREG32_IO(reg*4); | |
518 | return r; | |
519 | } | |
520 | ||
771fe6b9 JG |
521 | int radeon_atombios_init(struct radeon_device *rdev) |
522 | { | |
61c4b24b MF |
523 | struct card_info *atom_card_info = |
524 | kzalloc(sizeof(struct card_info), GFP_KERNEL); | |
525 | ||
526 | if (!atom_card_info) | |
527 | return -ENOMEM; | |
528 | ||
529 | rdev->mode_info.atom_card_info = atom_card_info; | |
530 | atom_card_info->dev = rdev->ddev; | |
531 | atom_card_info->reg_read = cail_reg_read; | |
532 | atom_card_info->reg_write = cail_reg_write; | |
351a52a2 AD |
533 | /* needed for iio ops */ |
534 | if (rdev->rio_mem) { | |
535 | atom_card_info->ioreg_read = cail_ioreg_read; | |
536 | atom_card_info->ioreg_write = cail_ioreg_write; | |
537 | } else { | |
538 | DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); | |
539 | atom_card_info->ioreg_read = cail_reg_read; | |
540 | atom_card_info->ioreg_write = cail_reg_write; | |
541 | } | |
61c4b24b MF |
542 | atom_card_info->mc_read = cail_mc_read; |
543 | atom_card_info->mc_write = cail_mc_write; | |
544 | atom_card_info->pll_read = cail_pll_read; | |
545 | atom_card_info->pll_write = cail_pll_write; | |
546 | ||
547 | rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); | |
c31ad97f | 548 | mutex_init(&rdev->mode_info.atom_context->mutex); |
771fe6b9 | 549 | radeon_atom_initialize_bios_scratch_regs(rdev->ddev); |
d904ef9b | 550 | atom_allocate_fb_scratch(rdev->mode_info.atom_context); |
771fe6b9 JG |
551 | return 0; |
552 | } | |
553 | ||
554 | void radeon_atombios_fini(struct radeon_device *rdev) | |
555 | { | |
4a04a844 JG |
556 | if (rdev->mode_info.atom_context) { |
557 | kfree(rdev->mode_info.atom_context->scratch); | |
558 | kfree(rdev->mode_info.atom_context); | |
559 | } | |
61c4b24b | 560 | kfree(rdev->mode_info.atom_card_info); |
771fe6b9 JG |
561 | } |
562 | ||
563 | int radeon_combios_init(struct radeon_device *rdev) | |
564 | { | |
565 | radeon_combios_initialize_bios_scratch_regs(rdev->ddev); | |
566 | return 0; | |
567 | } | |
568 | ||
569 | void radeon_combios_fini(struct radeon_device *rdev) | |
570 | { | |
571 | } | |
572 | ||
28d52043 DA |
573 | /* if we get transitioned to only one device, tak VGA back */ |
574 | static unsigned int radeon_vga_set_decode(void *cookie, bool state) | |
575 | { | |
576 | struct radeon_device *rdev = cookie; | |
28d52043 DA |
577 | radeon_vga_set_state(rdev, state); |
578 | if (state) | |
579 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | |
580 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
581 | else | |
582 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
583 | } | |
c1176d6f | 584 | |
36421338 JG |
585 | void radeon_check_arguments(struct radeon_device *rdev) |
586 | { | |
587 | /* vramlimit must be a power of two */ | |
588 | switch (radeon_vram_limit) { | |
589 | case 0: | |
590 | case 4: | |
591 | case 8: | |
592 | case 16: | |
593 | case 32: | |
594 | case 64: | |
595 | case 128: | |
596 | case 256: | |
597 | case 512: | |
598 | case 1024: | |
599 | case 2048: | |
600 | case 4096: | |
601 | break; | |
602 | default: | |
603 | dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", | |
604 | radeon_vram_limit); | |
605 | radeon_vram_limit = 0; | |
606 | break; | |
607 | } | |
608 | radeon_vram_limit = radeon_vram_limit << 20; | |
609 | /* gtt size must be power of two and greater or equal to 32M */ | |
610 | switch (radeon_gart_size) { | |
611 | case 4: | |
612 | case 8: | |
613 | case 16: | |
614 | dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", | |
615 | radeon_gart_size); | |
616 | radeon_gart_size = 512; | |
617 | break; | |
618 | case 32: | |
619 | case 64: | |
620 | case 128: | |
621 | case 256: | |
622 | case 512: | |
623 | case 1024: | |
624 | case 2048: | |
625 | case 4096: | |
626 | break; | |
627 | default: | |
628 | dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", | |
629 | radeon_gart_size); | |
630 | radeon_gart_size = 512; | |
631 | break; | |
632 | } | |
633 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | |
634 | /* AGP mode can only be -1, 1, 2, 4, 8 */ | |
635 | switch (radeon_agpmode) { | |
636 | case -1: | |
637 | case 0: | |
638 | case 1: | |
639 | case 2: | |
640 | case 4: | |
641 | case 8: | |
642 | break; | |
643 | default: | |
644 | dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " | |
645 | "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); | |
646 | radeon_agpmode = 0; | |
647 | break; | |
648 | } | |
649 | } | |
650 | ||
6a9ee8af DA |
651 | static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) |
652 | { | |
653 | struct drm_device *dev = pci_get_drvdata(pdev); | |
6a9ee8af DA |
654 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
655 | if (state == VGA_SWITCHEROO_ON) { | |
656 | printk(KERN_INFO "radeon: switched on\n"); | |
657 | /* don't suspend or resume card normally */ | |
5bcf719b | 658 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
6a9ee8af | 659 | radeon_resume_kms(dev); |
5bcf719b | 660 | dev->switch_power_state = DRM_SWITCH_POWER_ON; |
fbf81762 | 661 | drm_kms_helper_poll_enable(dev); |
6a9ee8af DA |
662 | } else { |
663 | printk(KERN_INFO "radeon: switched off\n"); | |
fbf81762 | 664 | drm_kms_helper_poll_disable(dev); |
5bcf719b | 665 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
6a9ee8af | 666 | radeon_suspend_kms(dev, pmm); |
5bcf719b | 667 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; |
6a9ee8af DA |
668 | } |
669 | } | |
670 | ||
671 | static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) | |
672 | { | |
673 | struct drm_device *dev = pci_get_drvdata(pdev); | |
674 | bool can_switch; | |
675 | ||
676 | spin_lock(&dev->count_lock); | |
677 | can_switch = (dev->open_count == 0); | |
678 | spin_unlock(&dev->count_lock); | |
679 | return can_switch; | |
680 | } | |
681 | ||
682 | ||
771fe6b9 JG |
683 | int radeon_device_init(struct radeon_device *rdev, |
684 | struct drm_device *ddev, | |
685 | struct pci_dev *pdev, | |
686 | uint32_t flags) | |
687 | { | |
351a52a2 | 688 | int r, i; |
ad49f501 | 689 | int dma_bits; |
771fe6b9 | 690 | |
771fe6b9 | 691 | rdev->shutdown = false; |
9f022ddf | 692 | rdev->dev = &pdev->dev; |
771fe6b9 JG |
693 | rdev->ddev = ddev; |
694 | rdev->pdev = pdev; | |
695 | rdev->flags = flags; | |
696 | rdev->family = flags & RADEON_FAMILY_MASK; | |
697 | rdev->is_atom_bios = false; | |
698 | rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; | |
699 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | |
700 | rdev->gpu_lockup = false; | |
733289c2 | 701 | rdev->accel_working = false; |
1b5331d9 JG |
702 | |
703 | DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n", | |
704 | radeon_family_name[rdev->family], pdev->vendor, pdev->device); | |
705 | ||
771fe6b9 JG |
706 | /* mutex initialization are all done here so we |
707 | * can recall function without having locking issues */ | |
708 | mutex_init(&rdev->cs_mutex); | |
709 | mutex_init(&rdev->ib_pool.mutex); | |
710 | mutex_init(&rdev->cp.mutex); | |
40bacf16 | 711 | mutex_init(&rdev->dc_hw_i2c_mutex); |
d8f60cfc AD |
712 | if (rdev->family >= CHIP_R600) |
713 | spin_lock_init(&rdev->ih.lock); | |
4c788679 | 714 | mutex_init(&rdev->gem.mutex); |
c913e23a | 715 | mutex_init(&rdev->pm.mutex); |
5876dd24 | 716 | mutex_init(&rdev->vram_mutex); |
771fe6b9 | 717 | rwlock_init(&rdev->fence_drv.lock); |
9f022ddf | 718 | INIT_LIST_HEAD(&rdev->gem.objects); |
73a6d3fc | 719 | init_waitqueue_head(&rdev->irq.vblank_queue); |
2031f77c | 720 | init_waitqueue_head(&rdev->irq.idle_queue); |
771fe6b9 | 721 | |
4aac0473 JG |
722 | /* Set asic functions */ |
723 | r = radeon_asic_init(rdev); | |
36421338 | 724 | if (r) |
4aac0473 | 725 | return r; |
36421338 | 726 | radeon_check_arguments(rdev); |
4aac0473 | 727 | |
f95df9ca AD |
728 | /* all of the newer IGP chips have an internal gart |
729 | * However some rs4xx report as AGP, so remove that here. | |
730 | */ | |
731 | if ((rdev->family >= CHIP_RS400) && | |
732 | (rdev->flags & RADEON_IS_IGP)) { | |
733 | rdev->flags &= ~RADEON_IS_AGP; | |
734 | } | |
735 | ||
30256a3f | 736 | if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { |
b574f251 | 737 | radeon_agp_disable(rdev); |
771fe6b9 JG |
738 | } |
739 | ||
ad49f501 DA |
740 | /* set DMA mask + need_dma32 flags. |
741 | * PCIE - can handle 40-bits. | |
742 | * IGP - can handle 40-bits (in theory) | |
743 | * AGP - generally dma32 is safest | |
744 | * PCI - only dma32 | |
745 | */ | |
746 | rdev->need_dma32 = false; | |
747 | if (rdev->flags & RADEON_IS_AGP) | |
748 | rdev->need_dma32 = true; | |
749 | if (rdev->flags & RADEON_IS_PCI) | |
750 | rdev->need_dma32 = true; | |
751 | ||
752 | dma_bits = rdev->need_dma32 ? 32 : 40; | |
753 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); | |
771fe6b9 JG |
754 | if (r) { |
755 | printk(KERN_WARNING "radeon: No suitable DMA available.\n"); | |
756 | } | |
757 | ||
758 | /* Registers mapping */ | |
759 | /* TODO: block userspace mapping of io register */ | |
01d73a69 JC |
760 | rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); |
761 | rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); | |
771fe6b9 JG |
762 | rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); |
763 | if (rdev->rmmio == NULL) { | |
764 | return -ENOMEM; | |
765 | } | |
766 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); | |
767 | DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); | |
768 | ||
351a52a2 AD |
769 | /* io port mapping */ |
770 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
771 | if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) { | |
772 | rdev->rio_mem_size = pci_resource_len(rdev->pdev, i); | |
773 | rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size); | |
774 | break; | |
775 | } | |
776 | } | |
777 | if (rdev->rio_mem == NULL) | |
778 | DRM_ERROR("Unable to find PCI I/O BAR\n"); | |
779 | ||
28d52043 | 780 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ |
93239ea1 DA |
781 | /* this will fail for cards that aren't VGA class devices, just |
782 | * ignore it */ | |
783 | vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); | |
6a9ee8af DA |
784 | vga_switcheroo_register_client(rdev->pdev, |
785 | radeon_switcheroo_set_state, | |
8d608aa6 | 786 | NULL, |
6a9ee8af | 787 | radeon_switcheroo_can_switch); |
28d52043 | 788 | |
3ce0a23d | 789 | r = radeon_init(rdev); |
b574f251 | 790 | if (r) |
3ce0a23d | 791 | return r; |
3ce0a23d | 792 | |
b574f251 JG |
793 | if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { |
794 | /* Acceleration not working on AGP card try again | |
795 | * with fallback to PCI or PCIE GART | |
796 | */ | |
a2d07b74 | 797 | radeon_asic_reset(rdev); |
b574f251 JG |
798 | radeon_fini(rdev); |
799 | radeon_agp_disable(rdev); | |
800 | r = radeon_init(rdev); | |
4aac0473 JG |
801 | if (r) |
802 | return r; | |
771fe6b9 | 803 | } |
ecc0b326 MD |
804 | if (radeon_testing) { |
805 | radeon_test_moves(rdev); | |
806 | } | |
771fe6b9 JG |
807 | if (radeon_benchmarking) { |
808 | radeon_benchmark(rdev); | |
809 | } | |
6cf8a3f5 | 810 | return 0; |
771fe6b9 JG |
811 | } |
812 | ||
813 | void radeon_device_fini(struct radeon_device *rdev) | |
814 | { | |
771fe6b9 JG |
815 | DRM_INFO("radeon: finishing device.\n"); |
816 | rdev->shutdown = true; | |
90aca4d2 JG |
817 | /* evict vram memory */ |
818 | radeon_bo_evict_vram(rdev); | |
62a8ea3f | 819 | radeon_fini(rdev); |
6a9ee8af | 820 | vga_switcheroo_unregister_client(rdev->pdev); |
c1176d6f | 821 | vga_client_register(rdev->pdev, NULL, NULL, NULL); |
e0a2ca73 AD |
822 | if (rdev->rio_mem) |
823 | pci_iounmap(rdev->pdev, rdev->rio_mem); | |
351a52a2 | 824 | rdev->rio_mem = NULL; |
771fe6b9 JG |
825 | iounmap(rdev->rmmio); |
826 | rdev->rmmio = NULL; | |
827 | } | |
828 | ||
829 | ||
830 | /* | |
831 | * Suspend & resume. | |
832 | */ | |
833 | int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |
834 | { | |
875c1866 | 835 | struct radeon_device *rdev; |
771fe6b9 | 836 | struct drm_crtc *crtc; |
d8dcaa1d | 837 | struct drm_connector *connector; |
4c788679 | 838 | int r; |
771fe6b9 | 839 | |
875c1866 | 840 | if (dev == NULL || dev->dev_private == NULL) { |
771fe6b9 JG |
841 | return -ENODEV; |
842 | } | |
843 | if (state.event == PM_EVENT_PRETHAW) { | |
844 | return 0; | |
845 | } | |
875c1866 DJ |
846 | rdev = dev->dev_private; |
847 | ||
5bcf719b | 848 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
6a9ee8af | 849 | return 0; |
d8dcaa1d AD |
850 | |
851 | /* turn off display hw */ | |
852 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | |
853 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | |
854 | } | |
855 | ||
771fe6b9 JG |
856 | /* unpin the front buffers */ |
857 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
858 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); | |
4c788679 | 859 | struct radeon_bo *robj; |
771fe6b9 JG |
860 | |
861 | if (rfb == NULL || rfb->obj == NULL) { | |
862 | continue; | |
863 | } | |
7e4d15d9 | 864 | robj = gem_to_radeon_bo(rfb->obj); |
38651674 DA |
865 | /* don't unpin kernel fb objects */ |
866 | if (!radeon_fbdev_robj_is_fb(rdev, robj)) { | |
4c788679 | 867 | r = radeon_bo_reserve(robj, false); |
38651674 | 868 | if (r == 0) { |
4c788679 JG |
869 | radeon_bo_unpin(robj); |
870 | radeon_bo_unreserve(robj); | |
871 | } | |
771fe6b9 JG |
872 | } |
873 | } | |
874 | /* evict vram memory */ | |
4c788679 | 875 | radeon_bo_evict_vram(rdev); |
771fe6b9 JG |
876 | /* wait for gpu to finish processing current batch */ |
877 | radeon_fence_wait_last(rdev); | |
878 | ||
f657c2a7 YZ |
879 | radeon_save_bios_scratch_regs(rdev); |
880 | ||
ce8f5370 | 881 | radeon_pm_suspend(rdev); |
62a8ea3f | 882 | radeon_suspend(rdev); |
d4877cf2 | 883 | radeon_hpd_fini(rdev); |
771fe6b9 | 884 | /* evict remaining vram memory */ |
4c788679 | 885 | radeon_bo_evict_vram(rdev); |
771fe6b9 | 886 | |
10b06122 JG |
887 | radeon_agp_suspend(rdev); |
888 | ||
771fe6b9 JG |
889 | pci_save_state(dev->pdev); |
890 | if (state.event == PM_EVENT_SUSPEND) { | |
891 | /* Shut down the device */ | |
892 | pci_disable_device(dev->pdev); | |
893 | pci_set_power_state(dev->pdev, PCI_D3hot); | |
894 | } | |
ac751efa | 895 | console_lock(); |
38651674 | 896 | radeon_fbdev_set_suspend(rdev, 1); |
ac751efa | 897 | console_unlock(); |
771fe6b9 JG |
898 | return 0; |
899 | } | |
900 | ||
901 | int radeon_resume_kms(struct drm_device *dev) | |
902 | { | |
09bdf591 | 903 | struct drm_connector *connector; |
771fe6b9 | 904 | struct radeon_device *rdev = dev->dev_private; |
771fe6b9 | 905 | |
5bcf719b | 906 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
6a9ee8af DA |
907 | return 0; |
908 | ||
ac751efa | 909 | console_lock(); |
771fe6b9 JG |
910 | pci_set_power_state(dev->pdev, PCI_D0); |
911 | pci_restore_state(dev->pdev); | |
912 | if (pci_enable_device(dev->pdev)) { | |
ac751efa | 913 | console_unlock(); |
771fe6b9 JG |
914 | return -1; |
915 | } | |
916 | pci_set_master(dev->pdev); | |
0ebf1717 DA |
917 | /* resume AGP if in use */ |
918 | radeon_agp_resume(rdev); | |
62a8ea3f | 919 | radeon_resume(rdev); |
ce8f5370 | 920 | radeon_pm_resume(rdev); |
f657c2a7 | 921 | radeon_restore_bios_scratch_regs(rdev); |
09bdf591 | 922 | |
38651674 | 923 | radeon_fbdev_set_suspend(rdev, 0); |
ac751efa | 924 | console_unlock(); |
771fe6b9 | 925 | |
d4877cf2 AD |
926 | /* reset hpd state */ |
927 | radeon_hpd_init(rdev); | |
771fe6b9 JG |
928 | /* blat the mode back in */ |
929 | drm_helper_resume_force_mode(dev); | |
a93f344d AD |
930 | /* turn on display hw */ |
931 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | |
932 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | |
933 | } | |
771fe6b9 JG |
934 | return 0; |
935 | } | |
936 | ||
90aca4d2 JG |
937 | int radeon_gpu_reset(struct radeon_device *rdev) |
938 | { | |
939 | int r; | |
8fd1b84c | 940 | int resched; |
90aca4d2 JG |
941 | |
942 | radeon_save_bios_scratch_regs(rdev); | |
8fd1b84c DA |
943 | /* block TTM */ |
944 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); | |
90aca4d2 JG |
945 | radeon_suspend(rdev); |
946 | ||
947 | r = radeon_asic_reset(rdev); | |
948 | if (!r) { | |
949 | dev_info(rdev->dev, "GPU reset succeed\n"); | |
950 | radeon_resume(rdev); | |
951 | radeon_restore_bios_scratch_regs(rdev); | |
952 | drm_helper_resume_force_mode(rdev->ddev); | |
8fd1b84c | 953 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); |
90aca4d2 JG |
954 | return 0; |
955 | } | |
956 | /* bad news, how to tell it to userspace ? */ | |
957 | dev_info(rdev->dev, "GPU reset failed\n"); | |
958 | return r; | |
959 | } | |
960 | ||
771fe6b9 JG |
961 | |
962 | /* | |
963 | * Debugfs | |
964 | */ | |
965 | struct radeon_debugfs { | |
966 | struct drm_info_list *files; | |
967 | unsigned num_files; | |
968 | }; | |
969 | static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES]; | |
970 | static unsigned _radeon_debugfs_count = 0; | |
971 | ||
972 | int radeon_debugfs_add_files(struct radeon_device *rdev, | |
973 | struct drm_info_list *files, | |
974 | unsigned nfiles) | |
975 | { | |
976 | unsigned i; | |
977 | ||
978 | for (i = 0; i < _radeon_debugfs_count; i++) { | |
979 | if (_radeon_debugfs[i].files == files) { | |
980 | /* Already registered */ | |
981 | return 0; | |
982 | } | |
983 | } | |
984 | if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) { | |
985 | DRM_ERROR("Reached maximum number of debugfs files.\n"); | |
986 | DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n"); | |
987 | return -EINVAL; | |
988 | } | |
989 | _radeon_debugfs[_radeon_debugfs_count].files = files; | |
990 | _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles; | |
991 | _radeon_debugfs_count++; | |
992 | #if defined(CONFIG_DEBUG_FS) | |
993 | drm_debugfs_create_files(files, nfiles, | |
994 | rdev->ddev->control->debugfs_root, | |
995 | rdev->ddev->control); | |
996 | drm_debugfs_create_files(files, nfiles, | |
997 | rdev->ddev->primary->debugfs_root, | |
998 | rdev->ddev->primary); | |
999 | #endif | |
1000 | return 0; | |
1001 | } | |
1002 | ||
1003 | #if defined(CONFIG_DEBUG_FS) | |
1004 | int radeon_debugfs_init(struct drm_minor *minor) | |
1005 | { | |
1006 | return 0; | |
1007 | } | |
1008 | ||
1009 | void radeon_debugfs_cleanup(struct drm_minor *minor) | |
1010 | { | |
1011 | unsigned i; | |
1012 | ||
1013 | for (i = 0; i < _radeon_debugfs_count; i++) { | |
1014 | drm_debugfs_remove_files(_radeon_debugfs[i].files, | |
1015 | _radeon_debugfs[i].num_files, minor); | |
1016 | } | |
1017 | } | |
1018 | #endif |