]>
Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #include <linux/seq_file.h> | |
29 | #include "drmP.h" | |
30 | #include "drm.h" | |
31 | #include "radeon_drm.h" | |
32 | #include "radeon_microcode.h" | |
33 | #include "radeon_reg.h" | |
34 | #include "radeon.h" | |
35 | ||
36 | /* This files gather functions specifics to: | |
37 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | |
38 | * | |
39 | * Some of these functions might be used by newer ASICs. | |
40 | */ | |
41 | void r100_hdp_reset(struct radeon_device *rdev); | |
42 | void r100_gpu_init(struct radeon_device *rdev); | |
43 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | |
44 | int r100_mc_wait_for_idle(struct radeon_device *rdev); | |
45 | void r100_gpu_wait_for_vsync(struct radeon_device *rdev); | |
46 | void r100_gpu_wait_for_vsync2(struct radeon_device *rdev); | |
47 | int r100_debugfs_mc_info_init(struct radeon_device *rdev); | |
48 | ||
49 | ||
50 | /* | |
51 | * PCI GART | |
52 | */ | |
53 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev) | |
54 | { | |
55 | /* TODO: can we do somethings here ? */ | |
56 | /* It seems hw only cache one entry so we should discard this | |
57 | * entry otherwise if first GPU GART read hit this entry it | |
58 | * could end up in wrong address. */ | |
59 | } | |
60 | ||
61 | int r100_pci_gart_enable(struct radeon_device *rdev) | |
62 | { | |
63 | uint32_t tmp; | |
64 | int r; | |
65 | ||
66 | /* Initialize common gart structure */ | |
67 | r = radeon_gart_init(rdev); | |
68 | if (r) { | |
69 | return r; | |
70 | } | |
71 | if (rdev->gart.table.ram.ptr == NULL) { | |
72 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; | |
73 | r = radeon_gart_table_ram_alloc(rdev); | |
74 | if (r) { | |
75 | return r; | |
76 | } | |
77 | } | |
78 | /* discard memory request outside of configured range */ | |
79 | tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; | |
80 | WREG32(RADEON_AIC_CNTL, tmp); | |
81 | /* set address range for PCI address translate */ | |
82 | WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); | |
83 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | |
84 | WREG32(RADEON_AIC_HI_ADDR, tmp); | |
85 | /* Enable bus mastering */ | |
86 | tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; | |
87 | WREG32(RADEON_BUS_CNTL, tmp); | |
88 | /* set PCI GART page-table base address */ | |
89 | WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); | |
90 | tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; | |
91 | WREG32(RADEON_AIC_CNTL, tmp); | |
92 | r100_pci_gart_tlb_flush(rdev); | |
93 | rdev->gart.ready = true; | |
94 | return 0; | |
95 | } | |
96 | ||
97 | void r100_pci_gart_disable(struct radeon_device *rdev) | |
98 | { | |
99 | uint32_t tmp; | |
100 | ||
101 | /* discard memory request outside of configured range */ | |
102 | tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; | |
103 | WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN); | |
104 | WREG32(RADEON_AIC_LO_ADDR, 0); | |
105 | WREG32(RADEON_AIC_HI_ADDR, 0); | |
106 | } | |
107 | ||
108 | int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |
109 | { | |
110 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | |
111 | return -EINVAL; | |
112 | } | |
113 | rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr); | |
114 | return 0; | |
115 | } | |
116 | ||
117 | int r100_gart_enable(struct radeon_device *rdev) | |
118 | { | |
119 | if (rdev->flags & RADEON_IS_AGP) { | |
120 | r100_pci_gart_disable(rdev); | |
121 | return 0; | |
122 | } | |
123 | return r100_pci_gart_enable(rdev); | |
124 | } | |
125 | ||
126 | ||
127 | /* | |
128 | * MC | |
129 | */ | |
130 | void r100_mc_disable_clients(struct radeon_device *rdev) | |
131 | { | |
132 | uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl; | |
133 | ||
134 | /* FIXME: is this function correct for rs100,rs200,rs300 ? */ | |
135 | if (r100_gui_wait_for_idle(rdev)) { | |
136 | printk(KERN_WARNING "Failed to wait GUI idle while " | |
137 | "programming pipes. Bad things might happen.\n"); | |
138 | } | |
139 | ||
140 | /* stop display and memory access */ | |
141 | ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL); | |
142 | WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE); | |
143 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); | |
144 | WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS); | |
145 | crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); | |
146 | ||
147 | r100_gpu_wait_for_vsync(rdev); | |
148 | ||
149 | WREG32(RADEON_CRTC_GEN_CNTL, | |
150 | (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) | | |
151 | RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN); | |
152 | ||
153 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { | |
154 | crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); | |
155 | ||
156 | r100_gpu_wait_for_vsync2(rdev); | |
157 | WREG32(RADEON_CRTC2_GEN_CNTL, | |
158 | (crtc2_gen_cntl & | |
159 | ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) | | |
160 | RADEON_CRTC2_DISP_REQ_EN_B); | |
161 | } | |
162 | ||
163 | udelay(500); | |
164 | } | |
165 | ||
166 | void r100_mc_setup(struct radeon_device *rdev) | |
167 | { | |
168 | uint32_t tmp; | |
169 | int r; | |
170 | ||
171 | r = r100_debugfs_mc_info_init(rdev); | |
172 | if (r) { | |
173 | DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); | |
174 | } | |
175 | /* Write VRAM size in case we are limiting it */ | |
176 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | |
177 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | |
178 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); | |
179 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); | |
180 | WREG32(RADEON_MC_FB_LOCATION, tmp); | |
181 | ||
182 | /* Enable bus mastering */ | |
183 | tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; | |
184 | WREG32(RADEON_BUS_CNTL, tmp); | |
185 | ||
186 | if (rdev->flags & RADEON_IS_AGP) { | |
187 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | |
188 | tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16); | |
189 | tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16); | |
190 | WREG32(RADEON_MC_AGP_LOCATION, tmp); | |
191 | WREG32(RADEON_AGP_BASE, rdev->mc.agp_base); | |
192 | } else { | |
193 | WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF); | |
194 | WREG32(RADEON_AGP_BASE, 0); | |
195 | } | |
196 | ||
197 | tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; | |
198 | tmp |= (7 << 28); | |
199 | WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); | |
200 | (void)RREG32(RADEON_HOST_PATH_CNTL); | |
201 | WREG32(RADEON_HOST_PATH_CNTL, tmp); | |
202 | (void)RREG32(RADEON_HOST_PATH_CNTL); | |
203 | } | |
204 | ||
205 | int r100_mc_init(struct radeon_device *rdev) | |
206 | { | |
207 | int r; | |
208 | ||
209 | if (r100_debugfs_rbbm_init(rdev)) { | |
210 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | |
211 | } | |
212 | ||
213 | r100_gpu_init(rdev); | |
214 | /* Disable gart which also disable out of gart access */ | |
215 | r100_pci_gart_disable(rdev); | |
216 | ||
217 | /* Setup GPU memory space */ | |
218 | rdev->mc.vram_location = 0xFFFFFFFFUL; | |
219 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | |
220 | if (rdev->flags & RADEON_IS_AGP) { | |
221 | r = radeon_agp_init(rdev); | |
222 | if (r) { | |
223 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | |
224 | rdev->flags &= ~RADEON_IS_AGP; | |
225 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | |
226 | } else { | |
227 | rdev->mc.gtt_location = rdev->mc.agp_base; | |
228 | } | |
229 | } | |
230 | r = radeon_mc_setup(rdev); | |
231 | if (r) { | |
232 | return r; | |
233 | } | |
234 | ||
235 | r100_mc_disable_clients(rdev); | |
236 | if (r100_mc_wait_for_idle(rdev)) { | |
237 | printk(KERN_WARNING "Failed to wait MC idle while " | |
238 | "programming pipes. Bad things might happen.\n"); | |
239 | } | |
240 | ||
241 | r100_mc_setup(rdev); | |
242 | return 0; | |
243 | } | |
244 | ||
245 | void r100_mc_fini(struct radeon_device *rdev) | |
246 | { | |
247 | r100_pci_gart_disable(rdev); | |
248 | radeon_gart_table_ram_free(rdev); | |
249 | radeon_gart_fini(rdev); | |
250 | } | |
251 | ||
252 | ||
253 | /* | |
254 | * Fence emission | |
255 | */ | |
256 | void r100_fence_ring_emit(struct radeon_device *rdev, | |
257 | struct radeon_fence *fence) | |
258 | { | |
259 | /* Who ever call radeon_fence_emit should call ring_lock and ask | |
260 | * for enough space (today caller are ib schedule and buffer move) */ | |
261 | /* Wait until IDLE & CLEAN */ | |
262 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); | |
263 | radeon_ring_write(rdev, (1 << 16) | (1 << 17)); | |
264 | /* Emit fence sequence & fire IRQ */ | |
265 | radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); | |
266 | radeon_ring_write(rdev, fence->seq); | |
267 | radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0)); | |
268 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); | |
269 | } | |
270 | ||
271 | ||
272 | /* | |
273 | * Writeback | |
274 | */ | |
275 | int r100_wb_init(struct radeon_device *rdev) | |
276 | { | |
277 | int r; | |
278 | ||
279 | if (rdev->wb.wb_obj == NULL) { | |
280 | r = radeon_object_create(rdev, NULL, 4096, | |
281 | true, | |
282 | RADEON_GEM_DOMAIN_GTT, | |
283 | false, &rdev->wb.wb_obj); | |
284 | if (r) { | |
285 | DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); | |
286 | return r; | |
287 | } | |
288 | r = radeon_object_pin(rdev->wb.wb_obj, | |
289 | RADEON_GEM_DOMAIN_GTT, | |
290 | &rdev->wb.gpu_addr); | |
291 | if (r) { | |
292 | DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); | |
293 | return r; | |
294 | } | |
295 | r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | |
296 | if (r) { | |
297 | DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); | |
298 | return r; | |
299 | } | |
300 | } | |
301 | WREG32(0x774, rdev->wb.gpu_addr); | |
302 | WREG32(0x70C, rdev->wb.gpu_addr + 1024); | |
303 | WREG32(0x770, 0xff); | |
304 | return 0; | |
305 | } | |
306 | ||
307 | void r100_wb_fini(struct radeon_device *rdev) | |
308 | { | |
309 | if (rdev->wb.wb_obj) { | |
310 | radeon_object_kunmap(rdev->wb.wb_obj); | |
311 | radeon_object_unpin(rdev->wb.wb_obj); | |
312 | radeon_object_unref(&rdev->wb.wb_obj); | |
313 | rdev->wb.wb = NULL; | |
314 | rdev->wb.wb_obj = NULL; | |
315 | } | |
316 | } | |
317 | ||
318 | int r100_copy_blit(struct radeon_device *rdev, | |
319 | uint64_t src_offset, | |
320 | uint64_t dst_offset, | |
321 | unsigned num_pages, | |
322 | struct radeon_fence *fence) | |
323 | { | |
324 | uint32_t cur_pages; | |
325 | uint32_t stride_bytes = PAGE_SIZE; | |
326 | uint32_t pitch; | |
327 | uint32_t stride_pixels; | |
328 | unsigned ndw; | |
329 | int num_loops; | |
330 | int r = 0; | |
331 | ||
332 | /* radeon limited to 16k stride */ | |
333 | stride_bytes &= 0x3fff; | |
334 | /* radeon pitch is /64 */ | |
335 | pitch = stride_bytes / 64; | |
336 | stride_pixels = stride_bytes / 4; | |
337 | num_loops = DIV_ROUND_UP(num_pages, 8191); | |
338 | ||
339 | /* Ask for enough room for blit + flush + fence */ | |
340 | ndw = 64 + (10 * num_loops); | |
341 | r = radeon_ring_lock(rdev, ndw); | |
342 | if (r) { | |
343 | DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); | |
344 | return -EINVAL; | |
345 | } | |
346 | while (num_pages > 0) { | |
347 | cur_pages = num_pages; | |
348 | if (cur_pages > 8191) { | |
349 | cur_pages = 8191; | |
350 | } | |
351 | num_pages -= cur_pages; | |
352 | ||
353 | /* pages are in Y direction - height | |
354 | page width in X direction - width */ | |
355 | radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8)); | |
356 | radeon_ring_write(rdev, | |
357 | RADEON_GMC_SRC_PITCH_OFFSET_CNTL | | |
358 | RADEON_GMC_DST_PITCH_OFFSET_CNTL | | |
359 | RADEON_GMC_SRC_CLIPPING | | |
360 | RADEON_GMC_DST_CLIPPING | | |
361 | RADEON_GMC_BRUSH_NONE | | |
362 | (RADEON_COLOR_FORMAT_ARGB8888 << 8) | | |
363 | RADEON_GMC_SRC_DATATYPE_COLOR | | |
364 | RADEON_ROP3_S | | |
365 | RADEON_DP_SRC_SOURCE_MEMORY | | |
366 | RADEON_GMC_CLR_CMP_CNTL_DIS | | |
367 | RADEON_GMC_WR_MSK_DIS); | |
368 | radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10)); | |
369 | radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10)); | |
370 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); | |
371 | radeon_ring_write(rdev, 0); | |
372 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); | |
373 | radeon_ring_write(rdev, num_pages); | |
374 | radeon_ring_write(rdev, num_pages); | |
375 | radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); | |
376 | } | |
377 | radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); | |
378 | radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL); | |
379 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); | |
380 | radeon_ring_write(rdev, | |
381 | RADEON_WAIT_2D_IDLECLEAN | | |
382 | RADEON_WAIT_HOST_IDLECLEAN | | |
383 | RADEON_WAIT_DMA_GUI_IDLE); | |
384 | if (fence) { | |
385 | r = radeon_fence_emit(rdev, fence); | |
386 | } | |
387 | radeon_ring_unlock_commit(rdev); | |
388 | return r; | |
389 | } | |
390 | ||
391 | ||
392 | /* | |
393 | * CP | |
394 | */ | |
395 | void r100_ring_start(struct radeon_device *rdev) | |
396 | { | |
397 | int r; | |
398 | ||
399 | r = radeon_ring_lock(rdev, 2); | |
400 | if (r) { | |
401 | return; | |
402 | } | |
403 | radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); | |
404 | radeon_ring_write(rdev, | |
405 | RADEON_ISYNC_ANY2D_IDLE3D | | |
406 | RADEON_ISYNC_ANY3D_IDLE2D | | |
407 | RADEON_ISYNC_WAIT_IDLEGUI | | |
408 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); | |
409 | radeon_ring_unlock_commit(rdev); | |
410 | } | |
411 | ||
412 | static void r100_cp_load_microcode(struct radeon_device *rdev) | |
413 | { | |
414 | int i; | |
415 | ||
416 | if (r100_gui_wait_for_idle(rdev)) { | |
417 | printk(KERN_WARNING "Failed to wait GUI idle while " | |
418 | "programming pipes. Bad things might happen.\n"); | |
419 | } | |
420 | ||
421 | WREG32(RADEON_CP_ME_RAM_ADDR, 0); | |
422 | if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || | |
423 | (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || | |
424 | (rdev->family == CHIP_RS200)) { | |
425 | DRM_INFO("Loading R100 Microcode\n"); | |
426 | for (i = 0; i < 256; i++) { | |
427 | WREG32(RADEON_CP_ME_RAM_DATAH, R100_cp_microcode[i][1]); | |
428 | WREG32(RADEON_CP_ME_RAM_DATAL, R100_cp_microcode[i][0]); | |
429 | } | |
430 | } else if ((rdev->family == CHIP_R200) || | |
431 | (rdev->family == CHIP_RV250) || | |
432 | (rdev->family == CHIP_RV280) || | |
433 | (rdev->family == CHIP_RS300)) { | |
434 | DRM_INFO("Loading R200 Microcode\n"); | |
435 | for (i = 0; i < 256; i++) { | |
436 | WREG32(RADEON_CP_ME_RAM_DATAH, R200_cp_microcode[i][1]); | |
437 | WREG32(RADEON_CP_ME_RAM_DATAL, R200_cp_microcode[i][0]); | |
438 | } | |
439 | } else if ((rdev->family == CHIP_R300) || | |
440 | (rdev->family == CHIP_R350) || | |
441 | (rdev->family == CHIP_RV350) || | |
442 | (rdev->family == CHIP_RV380) || | |
443 | (rdev->family == CHIP_RS400) || | |
444 | (rdev->family == CHIP_RS480)) { | |
445 | DRM_INFO("Loading R300 Microcode\n"); | |
446 | for (i = 0; i < 256; i++) { | |
447 | WREG32(RADEON_CP_ME_RAM_DATAH, R300_cp_microcode[i][1]); | |
448 | WREG32(RADEON_CP_ME_RAM_DATAL, R300_cp_microcode[i][0]); | |
449 | } | |
450 | } else if ((rdev->family == CHIP_R420) || | |
451 | (rdev->family == CHIP_R423) || | |
452 | (rdev->family == CHIP_RV410)) { | |
453 | DRM_INFO("Loading R400 Microcode\n"); | |
454 | for (i = 0; i < 256; i++) { | |
455 | WREG32(RADEON_CP_ME_RAM_DATAH, R420_cp_microcode[i][1]); | |
456 | WREG32(RADEON_CP_ME_RAM_DATAL, R420_cp_microcode[i][0]); | |
457 | } | |
458 | } else if ((rdev->family == CHIP_RS690) || | |
459 | (rdev->family == CHIP_RS740)) { | |
460 | DRM_INFO("Loading RS690/RS740 Microcode\n"); | |
461 | for (i = 0; i < 256; i++) { | |
462 | WREG32(RADEON_CP_ME_RAM_DATAH, RS690_cp_microcode[i][1]); | |
463 | WREG32(RADEON_CP_ME_RAM_DATAL, RS690_cp_microcode[i][0]); | |
464 | } | |
465 | } else if (rdev->family == CHIP_RS600) { | |
466 | DRM_INFO("Loading RS600 Microcode\n"); | |
467 | for (i = 0; i < 256; i++) { | |
468 | WREG32(RADEON_CP_ME_RAM_DATAH, RS600_cp_microcode[i][1]); | |
469 | WREG32(RADEON_CP_ME_RAM_DATAL, RS600_cp_microcode[i][0]); | |
470 | } | |
471 | } else if ((rdev->family == CHIP_RV515) || | |
472 | (rdev->family == CHIP_R520) || | |
473 | (rdev->family == CHIP_RV530) || | |
474 | (rdev->family == CHIP_R580) || | |
475 | (rdev->family == CHIP_RV560) || | |
476 | (rdev->family == CHIP_RV570)) { | |
477 | DRM_INFO("Loading R500 Microcode\n"); | |
478 | for (i = 0; i < 256; i++) { | |
479 | WREG32(RADEON_CP_ME_RAM_DATAH, R520_cp_microcode[i][1]); | |
480 | WREG32(RADEON_CP_ME_RAM_DATAL, R520_cp_microcode[i][0]); | |
481 | } | |
482 | } | |
483 | } | |
484 | ||
485 | int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |
486 | { | |
487 | unsigned rb_bufsz; | |
488 | unsigned rb_blksz; | |
489 | unsigned max_fetch; | |
490 | unsigned pre_write_timer; | |
491 | unsigned pre_write_limit; | |
492 | unsigned indirect2_start; | |
493 | unsigned indirect1_start; | |
494 | uint32_t tmp; | |
495 | int r; | |
496 | ||
497 | if (r100_debugfs_cp_init(rdev)) { | |
498 | DRM_ERROR("Failed to register debugfs file for CP !\n"); | |
499 | } | |
500 | /* Reset CP */ | |
501 | tmp = RREG32(RADEON_CP_CSQ_STAT); | |
502 | if ((tmp & (1 << 31))) { | |
503 | DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp); | |
504 | WREG32(RADEON_CP_CSQ_MODE, 0); | |
505 | WREG32(RADEON_CP_CSQ_CNTL, 0); | |
506 | WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); | |
507 | tmp = RREG32(RADEON_RBBM_SOFT_RESET); | |
508 | mdelay(2); | |
509 | WREG32(RADEON_RBBM_SOFT_RESET, 0); | |
510 | tmp = RREG32(RADEON_RBBM_SOFT_RESET); | |
511 | mdelay(2); | |
512 | tmp = RREG32(RADEON_CP_CSQ_STAT); | |
513 | if ((tmp & (1 << 31))) { | |
514 | DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp); | |
515 | } | |
516 | } else { | |
517 | DRM_INFO("radeon: cp idle (0x%08X)\n", tmp); | |
518 | } | |
519 | /* Align ring size */ | |
520 | rb_bufsz = drm_order(ring_size / 8); | |
521 | ring_size = (1 << (rb_bufsz + 1)) * 4; | |
522 | r100_cp_load_microcode(rdev); | |
523 | r = radeon_ring_init(rdev, ring_size); | |
524 | if (r) { | |
525 | return r; | |
526 | } | |
527 | /* Each time the cp read 1024 bytes (16 dword/quadword) update | |
528 | * the rptr copy in system ram */ | |
529 | rb_blksz = 9; | |
530 | /* cp will read 128bytes at a time (4 dwords) */ | |
531 | max_fetch = 1; | |
532 | rdev->cp.align_mask = 16 - 1; | |
533 | /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ | |
534 | pre_write_timer = 64; | |
535 | /* Force CP_RB_WPTR write if written more than one time before the | |
536 | * delay expire | |
537 | */ | |
538 | pre_write_limit = 0; | |
539 | /* Setup the cp cache like this (cache size is 96 dwords) : | |
540 | * RING 0 to 15 | |
541 | * INDIRECT1 16 to 79 | |
542 | * INDIRECT2 80 to 95 | |
543 | * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) | |
544 | * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords)) | |
545 | * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) | |
546 | * Idea being that most of the gpu cmd will be through indirect1 buffer | |
547 | * so it gets the bigger cache. | |
548 | */ | |
549 | indirect2_start = 80; | |
550 | indirect1_start = 16; | |
551 | /* cp setup */ | |
552 | WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); | |
553 | WREG32(RADEON_CP_RB_CNTL, | |
4e484e7d MD |
554 | #ifdef __BIG_ENDIAN |
555 | RADEON_BUF_SWAP_32BIT | | |
556 | #endif | |
771fe6b9 JG |
557 | REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | |
558 | REG_SET(RADEON_RB_BLKSZ, rb_blksz) | | |
559 | REG_SET(RADEON_MAX_FETCH, max_fetch) | | |
560 | RADEON_RB_NO_UPDATE); | |
561 | /* Set ring address */ | |
562 | DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); | |
563 | WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); | |
564 | /* Force read & write ptr to 0 */ | |
565 | tmp = RREG32(RADEON_CP_RB_CNTL); | |
566 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); | |
567 | WREG32(RADEON_CP_RB_RPTR_WR, 0); | |
568 | WREG32(RADEON_CP_RB_WPTR, 0); | |
569 | WREG32(RADEON_CP_RB_CNTL, tmp); | |
570 | udelay(10); | |
571 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); | |
572 | rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); | |
573 | /* Set cp mode to bus mastering & enable cp*/ | |
574 | WREG32(RADEON_CP_CSQ_MODE, | |
575 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | | |
576 | REG_SET(RADEON_INDIRECT1_START, indirect1_start)); | |
577 | WREG32(0x718, 0); | |
578 | WREG32(0x744, 0x00004D4D); | |
579 | WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); | |
580 | radeon_ring_start(rdev); | |
581 | r = radeon_ring_test(rdev); | |
582 | if (r) { | |
583 | DRM_ERROR("radeon: cp isn't working (%d).\n", r); | |
584 | return r; | |
585 | } | |
586 | rdev->cp.ready = true; | |
587 | return 0; | |
588 | } | |
589 | ||
590 | void r100_cp_fini(struct radeon_device *rdev) | |
591 | { | |
592 | /* Disable ring */ | |
593 | rdev->cp.ready = false; | |
594 | WREG32(RADEON_CP_CSQ_CNTL, 0); | |
595 | radeon_ring_fini(rdev); | |
596 | DRM_INFO("radeon: cp finalized\n"); | |
597 | } | |
598 | ||
599 | void r100_cp_disable(struct radeon_device *rdev) | |
600 | { | |
601 | /* Disable ring */ | |
602 | rdev->cp.ready = false; | |
603 | WREG32(RADEON_CP_CSQ_MODE, 0); | |
604 | WREG32(RADEON_CP_CSQ_CNTL, 0); | |
605 | if (r100_gui_wait_for_idle(rdev)) { | |
606 | printk(KERN_WARNING "Failed to wait GUI idle while " | |
607 | "programming pipes. Bad things might happen.\n"); | |
608 | } | |
609 | } | |
610 | ||
611 | int r100_cp_reset(struct radeon_device *rdev) | |
612 | { | |
613 | uint32_t tmp; | |
614 | bool reinit_cp; | |
615 | int i; | |
616 | ||
617 | reinit_cp = rdev->cp.ready; | |
618 | rdev->cp.ready = false; | |
619 | WREG32(RADEON_CP_CSQ_MODE, 0); | |
620 | WREG32(RADEON_CP_CSQ_CNTL, 0); | |
621 | WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); | |
622 | (void)RREG32(RADEON_RBBM_SOFT_RESET); | |
623 | udelay(200); | |
624 | WREG32(RADEON_RBBM_SOFT_RESET, 0); | |
625 | /* Wait to prevent race in RBBM_STATUS */ | |
626 | mdelay(1); | |
627 | for (i = 0; i < rdev->usec_timeout; i++) { | |
628 | tmp = RREG32(RADEON_RBBM_STATUS); | |
629 | if (!(tmp & (1 << 16))) { | |
630 | DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n", | |
631 | tmp); | |
632 | if (reinit_cp) { | |
633 | return r100_cp_init(rdev, rdev->cp.ring_size); | |
634 | } | |
635 | return 0; | |
636 | } | |
637 | DRM_UDELAY(1); | |
638 | } | |
639 | tmp = RREG32(RADEON_RBBM_STATUS); | |
640 | DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp); | |
641 | return -1; | |
642 | } | |
643 | ||
644 | ||
645 | /* | |
646 | * CS functions | |
647 | */ | |
648 | int r100_cs_parse_packet0(struct radeon_cs_parser *p, | |
649 | struct radeon_cs_packet *pkt, | |
650 | unsigned *auth, unsigned n, | |
651 | radeon_packet0_check_t check) | |
652 | { | |
653 | unsigned reg; | |
654 | unsigned i, j, m; | |
655 | unsigned idx; | |
656 | int r; | |
657 | ||
658 | idx = pkt->idx + 1; | |
659 | reg = pkt->reg; | |
660 | if (pkt->one_reg_wr) { | |
661 | if ((reg >> 7) > n) { | |
662 | return -EINVAL; | |
663 | } | |
664 | } else { | |
665 | if (((reg + (pkt->count << 2)) >> 7) > n) { | |
666 | return -EINVAL; | |
667 | } | |
668 | } | |
669 | for (i = 0; i <= pkt->count; i++, idx++) { | |
670 | j = (reg >> 7); | |
671 | m = 1 << ((reg >> 2) & 31); | |
672 | if (auth[j] & m) { | |
673 | r = check(p, pkt, idx, reg); | |
674 | if (r) { | |
675 | return r; | |
676 | } | |
677 | } | |
678 | if (pkt->one_reg_wr) { | |
679 | if (!(auth[j] & m)) { | |
680 | break; | |
681 | } | |
682 | } else { | |
683 | reg += 4; | |
684 | } | |
685 | } | |
686 | return 0; | |
687 | } | |
688 | ||
689 | int r100_cs_parse_packet3(struct radeon_cs_parser *p, | |
690 | struct radeon_cs_packet *pkt, | |
691 | unsigned *auth, unsigned n, | |
692 | radeon_packet3_check_t check) | |
693 | { | |
694 | unsigned i, m; | |
695 | ||
696 | if ((pkt->opcode >> 5) > n) { | |
697 | return -EINVAL; | |
698 | } | |
699 | i = pkt->opcode >> 5; | |
700 | m = 1 << (pkt->opcode & 31); | |
701 | if (auth[i] & m) { | |
702 | return check(p, pkt); | |
703 | } | |
704 | return 0; | |
705 | } | |
706 | ||
707 | void r100_cs_dump_packet(struct radeon_cs_parser *p, | |
708 | struct radeon_cs_packet *pkt) | |
709 | { | |
710 | struct radeon_cs_chunk *ib_chunk; | |
711 | volatile uint32_t *ib; | |
712 | unsigned i; | |
713 | unsigned idx; | |
714 | ||
715 | ib = p->ib->ptr; | |
716 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | |
717 | idx = pkt->idx; | |
718 | for (i = 0; i <= (pkt->count + 1); i++, idx++) { | |
719 | DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); | |
720 | } | |
721 | } | |
722 | ||
723 | /** | |
724 | * r100_cs_packet_parse() - parse cp packet and point ib index to next packet | |
725 | * @parser: parser structure holding parsing context. | |
726 | * @pkt: where to store packet informations | |
727 | * | |
728 | * Assume that chunk_ib_index is properly set. Will return -EINVAL | |
729 | * if packet is bigger than remaining ib size. or if packets is unknown. | |
730 | **/ | |
731 | int r100_cs_packet_parse(struct radeon_cs_parser *p, | |
732 | struct radeon_cs_packet *pkt, | |
733 | unsigned idx) | |
734 | { | |
735 | struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; | |
736 | uint32_t header = ib_chunk->kdata[idx]; | |
737 | ||
738 | if (idx >= ib_chunk->length_dw) { | |
739 | DRM_ERROR("Can not parse packet at %d after CS end %d !\n", | |
740 | idx, ib_chunk->length_dw); | |
741 | return -EINVAL; | |
742 | } | |
743 | pkt->idx = idx; | |
744 | pkt->type = CP_PACKET_GET_TYPE(header); | |
745 | pkt->count = CP_PACKET_GET_COUNT(header); | |
746 | switch (pkt->type) { | |
747 | case PACKET_TYPE0: | |
748 | pkt->reg = CP_PACKET0_GET_REG(header); | |
749 | pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header); | |
750 | break; | |
751 | case PACKET_TYPE3: | |
752 | pkt->opcode = CP_PACKET3_GET_OPCODE(header); | |
753 | break; | |
754 | case PACKET_TYPE2: | |
755 | pkt->count = -1; | |
756 | break; | |
757 | default: | |
758 | DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); | |
759 | return -EINVAL; | |
760 | } | |
761 | if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { | |
762 | DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", | |
763 | pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); | |
764 | return -EINVAL; | |
765 | } | |
766 | return 0; | |
767 | } | |
768 | ||
769 | /** | |
770 | * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 | |
771 | * @parser: parser structure holding parsing context. | |
772 | * @data: pointer to relocation data | |
773 | * @offset_start: starting offset | |
774 | * @offset_mask: offset mask (to align start offset on) | |
775 | * @reloc: reloc informations | |
776 | * | |
777 | * Check next packet is relocation packet3, do bo validation and compute | |
778 | * GPU offset using the provided start. | |
779 | **/ | |
780 | int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, | |
781 | struct radeon_cs_reloc **cs_reloc) | |
782 | { | |
783 | struct radeon_cs_chunk *ib_chunk; | |
784 | struct radeon_cs_chunk *relocs_chunk; | |
785 | struct radeon_cs_packet p3reloc; | |
786 | unsigned idx; | |
787 | int r; | |
788 | ||
789 | if (p->chunk_relocs_idx == -1) { | |
790 | DRM_ERROR("No relocation chunk !\n"); | |
791 | return -EINVAL; | |
792 | } | |
793 | *cs_reloc = NULL; | |
794 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | |
795 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | |
796 | r = r100_cs_packet_parse(p, &p3reloc, p->idx); | |
797 | if (r) { | |
798 | return r; | |
799 | } | |
800 | p->idx += p3reloc.count + 2; | |
801 | if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { | |
802 | DRM_ERROR("No packet3 for relocation for packet at %d.\n", | |
803 | p3reloc.idx); | |
804 | r100_cs_dump_packet(p, &p3reloc); | |
805 | return -EINVAL; | |
806 | } | |
807 | idx = ib_chunk->kdata[p3reloc.idx + 1]; | |
808 | if (idx >= relocs_chunk->length_dw) { | |
809 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | |
810 | idx, relocs_chunk->length_dw); | |
811 | r100_cs_dump_packet(p, &p3reloc); | |
812 | return -EINVAL; | |
813 | } | |
814 | /* FIXME: we assume reloc size is 4 dwords */ | |
815 | *cs_reloc = p->relocs_ptr[(idx / 4)]; | |
816 | return 0; | |
817 | } | |
818 | ||
819 | static int r100_packet0_check(struct radeon_cs_parser *p, | |
820 | struct radeon_cs_packet *pkt) | |
821 | { | |
822 | struct radeon_cs_chunk *ib_chunk; | |
823 | struct radeon_cs_reloc *reloc; | |
824 | volatile uint32_t *ib; | |
825 | uint32_t tmp; | |
826 | unsigned reg; | |
827 | unsigned i; | |
828 | unsigned idx; | |
829 | bool onereg; | |
830 | int r; | |
831 | ||
832 | ib = p->ib->ptr; | |
833 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | |
834 | idx = pkt->idx + 1; | |
835 | reg = pkt->reg; | |
836 | onereg = false; | |
837 | if (CP_PACKET0_GET_ONE_REG_WR(ib_chunk->kdata[pkt->idx])) { | |
838 | onereg = true; | |
839 | } | |
840 | for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { | |
841 | switch (reg) { | |
842 | /* FIXME: only allow PACKET3 blit? easier to check for out of | |
843 | * range access */ | |
844 | case RADEON_DST_PITCH_OFFSET: | |
845 | case RADEON_SRC_PITCH_OFFSET: | |
846 | r = r100_cs_packet_next_reloc(p, &reloc); | |
847 | if (r) { | |
848 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | |
849 | idx, reg); | |
850 | r100_cs_dump_packet(p, pkt); | |
851 | return r; | |
852 | } | |
853 | tmp = ib_chunk->kdata[idx] & 0x003fffff; | |
854 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); | |
855 | ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; | |
856 | break; | |
857 | case RADEON_RB3D_DEPTHOFFSET: | |
858 | case RADEON_RB3D_COLOROFFSET: | |
859 | case R300_RB3D_COLOROFFSET0: | |
860 | case R300_ZB_DEPTHOFFSET: | |
861 | case R200_PP_TXOFFSET_0: | |
862 | case R200_PP_TXOFFSET_1: | |
863 | case R200_PP_TXOFFSET_2: | |
864 | case R200_PP_TXOFFSET_3: | |
865 | case R200_PP_TXOFFSET_4: | |
866 | case R200_PP_TXOFFSET_5: | |
867 | case RADEON_PP_TXOFFSET_0: | |
868 | case RADEON_PP_TXOFFSET_1: | |
869 | case RADEON_PP_TXOFFSET_2: | |
870 | case R300_TX_OFFSET_0: | |
871 | case R300_TX_OFFSET_0+4: | |
872 | case R300_TX_OFFSET_0+8: | |
873 | case R300_TX_OFFSET_0+12: | |
874 | case R300_TX_OFFSET_0+16: | |
875 | case R300_TX_OFFSET_0+20: | |
876 | case R300_TX_OFFSET_0+24: | |
877 | case R300_TX_OFFSET_0+28: | |
878 | case R300_TX_OFFSET_0+32: | |
879 | case R300_TX_OFFSET_0+36: | |
880 | case R300_TX_OFFSET_0+40: | |
881 | case R300_TX_OFFSET_0+44: | |
882 | case R300_TX_OFFSET_0+48: | |
883 | case R300_TX_OFFSET_0+52: | |
884 | case R300_TX_OFFSET_0+56: | |
885 | case R300_TX_OFFSET_0+60: | |
886 | r = r100_cs_packet_next_reloc(p, &reloc); | |
887 | if (r) { | |
888 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | |
889 | idx, reg); | |
890 | r100_cs_dump_packet(p, pkt); | |
891 | return r; | |
892 | } | |
893 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | |
894 | break; | |
895 | default: | |
896 | /* FIXME: we don't want to allow anyothers packet */ | |
897 | break; | |
898 | } | |
899 | if (onereg) { | |
900 | /* FIXME: forbid onereg write to register on relocate */ | |
901 | break; | |
902 | } | |
903 | } | |
904 | return 0; | |
905 | } | |
906 | ||
907 | static int r100_packet3_check(struct radeon_cs_parser *p, | |
908 | struct radeon_cs_packet *pkt) | |
909 | { | |
910 | struct radeon_cs_chunk *ib_chunk; | |
911 | struct radeon_cs_reloc *reloc; | |
912 | unsigned idx; | |
913 | unsigned i, c; | |
914 | volatile uint32_t *ib; | |
915 | int r; | |
916 | ||
917 | ib = p->ib->ptr; | |
918 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | |
919 | idx = pkt->idx + 1; | |
920 | switch (pkt->opcode) { | |
921 | case PACKET3_3D_LOAD_VBPNTR: | |
922 | c = ib_chunk->kdata[idx++]; | |
923 | for (i = 0; i < (c - 1); i += 2, idx += 3) { | |
924 | r = r100_cs_packet_next_reloc(p, &reloc); | |
925 | if (r) { | |
926 | DRM_ERROR("No reloc for packet3 %d\n", | |
927 | pkt->opcode); | |
928 | r100_cs_dump_packet(p, pkt); | |
929 | return r; | |
930 | } | |
931 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | |
932 | r = r100_cs_packet_next_reloc(p, &reloc); | |
933 | if (r) { | |
934 | DRM_ERROR("No reloc for packet3 %d\n", | |
935 | pkt->opcode); | |
936 | r100_cs_dump_packet(p, pkt); | |
937 | return r; | |
938 | } | |
939 | ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset); | |
940 | } | |
941 | if (c & 1) { | |
942 | r = r100_cs_packet_next_reloc(p, &reloc); | |
943 | if (r) { | |
944 | DRM_ERROR("No reloc for packet3 %d\n", | |
945 | pkt->opcode); | |
946 | r100_cs_dump_packet(p, pkt); | |
947 | return r; | |
948 | } | |
949 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | |
950 | } | |
951 | break; | |
952 | case PACKET3_INDX_BUFFER: | |
953 | r = r100_cs_packet_next_reloc(p, &reloc); | |
954 | if (r) { | |
955 | DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); | |
956 | r100_cs_dump_packet(p, pkt); | |
957 | return r; | |
958 | } | |
959 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | |
960 | break; | |
961 | case 0x23: | |
962 | /* FIXME: cleanup */ | |
963 | /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ | |
964 | r = r100_cs_packet_next_reloc(p, &reloc); | |
965 | if (r) { | |
966 | DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); | |
967 | r100_cs_dump_packet(p, pkt); | |
968 | return r; | |
969 | } | |
970 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | |
971 | break; | |
972 | case PACKET3_3D_DRAW_IMMD: | |
973 | /* triggers drawing using in-packet vertex data */ | |
974 | case PACKET3_3D_DRAW_IMMD_2: | |
975 | /* triggers drawing using in-packet vertex data */ | |
976 | case PACKET3_3D_DRAW_VBUF_2: | |
977 | /* triggers drawing of vertex buffers setup elsewhere */ | |
978 | case PACKET3_3D_DRAW_INDX_2: | |
979 | /* triggers drawing using indices to vertex buffer */ | |
980 | case PACKET3_3D_DRAW_VBUF: | |
981 | /* triggers drawing of vertex buffers setup elsewhere */ | |
982 | case PACKET3_3D_DRAW_INDX: | |
983 | /* triggers drawing using indices to vertex buffer */ | |
984 | case PACKET3_NOP: | |
985 | break; | |
986 | default: | |
987 | DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); | |
988 | return -EINVAL; | |
989 | } | |
990 | return 0; | |
991 | } | |
992 | ||
993 | int r100_cs_parse(struct radeon_cs_parser *p) | |
994 | { | |
995 | struct radeon_cs_packet pkt; | |
996 | int r; | |
997 | ||
998 | do { | |
999 | r = r100_cs_packet_parse(p, &pkt, p->idx); | |
1000 | if (r) { | |
1001 | return r; | |
1002 | } | |
1003 | p->idx += pkt.count + 2; | |
1004 | switch (pkt.type) { | |
1005 | case PACKET_TYPE0: | |
1006 | r = r100_packet0_check(p, &pkt); | |
1007 | break; | |
1008 | case PACKET_TYPE2: | |
1009 | break; | |
1010 | case PACKET_TYPE3: | |
1011 | r = r100_packet3_check(p, &pkt); | |
1012 | break; | |
1013 | default: | |
1014 | DRM_ERROR("Unknown packet type %d !\n", | |
1015 | pkt.type); | |
1016 | return -EINVAL; | |
1017 | } | |
1018 | if (r) { | |
1019 | return r; | |
1020 | } | |
1021 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); | |
1022 | return 0; | |
1023 | } | |
1024 | ||
1025 | ||
1026 | /* | |
1027 | * Global GPU functions | |
1028 | */ | |
1029 | void r100_errata(struct radeon_device *rdev) | |
1030 | { | |
1031 | rdev->pll_errata = 0; | |
1032 | ||
1033 | if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) { | |
1034 | rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS; | |
1035 | } | |
1036 | ||
1037 | if (rdev->family == CHIP_RV100 || | |
1038 | rdev->family == CHIP_RS100 || | |
1039 | rdev->family == CHIP_RS200) { | |
1040 | rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY; | |
1041 | } | |
1042 | } | |
1043 | ||
1044 | /* Wait for vertical sync on primary CRTC */ | |
1045 | void r100_gpu_wait_for_vsync(struct radeon_device *rdev) | |
1046 | { | |
1047 | uint32_t crtc_gen_cntl, tmp; | |
1048 | int i; | |
1049 | ||
1050 | crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); | |
1051 | if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) || | |
1052 | !(crtc_gen_cntl & RADEON_CRTC_EN)) { | |
1053 | return; | |
1054 | } | |
1055 | /* Clear the CRTC_VBLANK_SAVE bit */ | |
1056 | WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR); | |
1057 | for (i = 0; i < rdev->usec_timeout; i++) { | |
1058 | tmp = RREG32(RADEON_CRTC_STATUS); | |
1059 | if (tmp & RADEON_CRTC_VBLANK_SAVE) { | |
1060 | return; | |
1061 | } | |
1062 | DRM_UDELAY(1); | |
1063 | } | |
1064 | } | |
1065 | ||
1066 | /* Wait for vertical sync on secondary CRTC */ | |
1067 | void r100_gpu_wait_for_vsync2(struct radeon_device *rdev) | |
1068 | { | |
1069 | uint32_t crtc2_gen_cntl, tmp; | |
1070 | int i; | |
1071 | ||
1072 | crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); | |
1073 | if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) || | |
1074 | !(crtc2_gen_cntl & RADEON_CRTC2_EN)) | |
1075 | return; | |
1076 | ||
1077 | /* Clear the CRTC_VBLANK_SAVE bit */ | |
1078 | WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR); | |
1079 | for (i = 0; i < rdev->usec_timeout; i++) { | |
1080 | tmp = RREG32(RADEON_CRTC2_STATUS); | |
1081 | if (tmp & RADEON_CRTC2_VBLANK_SAVE) { | |
1082 | return; | |
1083 | } | |
1084 | DRM_UDELAY(1); | |
1085 | } | |
1086 | } | |
1087 | ||
1088 | int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) | |
1089 | { | |
1090 | unsigned i; | |
1091 | uint32_t tmp; | |
1092 | ||
1093 | for (i = 0; i < rdev->usec_timeout; i++) { | |
1094 | tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK; | |
1095 | if (tmp >= n) { | |
1096 | return 0; | |
1097 | } | |
1098 | DRM_UDELAY(1); | |
1099 | } | |
1100 | return -1; | |
1101 | } | |
1102 | ||
1103 | int r100_gui_wait_for_idle(struct radeon_device *rdev) | |
1104 | { | |
1105 | unsigned i; | |
1106 | uint32_t tmp; | |
1107 | ||
1108 | if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) { | |
1109 | printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !" | |
1110 | " Bad things might happen.\n"); | |
1111 | } | |
1112 | for (i = 0; i < rdev->usec_timeout; i++) { | |
1113 | tmp = RREG32(RADEON_RBBM_STATUS); | |
1114 | if (!(tmp & (1 << 31))) { | |
1115 | return 0; | |
1116 | } | |
1117 | DRM_UDELAY(1); | |
1118 | } | |
1119 | return -1; | |
1120 | } | |
1121 | ||
1122 | int r100_mc_wait_for_idle(struct radeon_device *rdev) | |
1123 | { | |
1124 | unsigned i; | |
1125 | uint32_t tmp; | |
1126 | ||
1127 | for (i = 0; i < rdev->usec_timeout; i++) { | |
1128 | /* read MC_STATUS */ | |
1129 | tmp = RREG32(0x0150); | |
1130 | if (tmp & (1 << 2)) { | |
1131 | return 0; | |
1132 | } | |
1133 | DRM_UDELAY(1); | |
1134 | } | |
1135 | return -1; | |
1136 | } | |
1137 | ||
1138 | void r100_gpu_init(struct radeon_device *rdev) | |
1139 | { | |
1140 | /* TODO: anythings to do here ? pipes ? */ | |
1141 | r100_hdp_reset(rdev); | |
1142 | } | |
1143 | ||
1144 | void r100_hdp_reset(struct radeon_device *rdev) | |
1145 | { | |
1146 | uint32_t tmp; | |
1147 | ||
1148 | tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; | |
1149 | tmp |= (7 << 28); | |
1150 | WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); | |
1151 | (void)RREG32(RADEON_HOST_PATH_CNTL); | |
1152 | udelay(200); | |
1153 | WREG32(RADEON_RBBM_SOFT_RESET, 0); | |
1154 | WREG32(RADEON_HOST_PATH_CNTL, tmp); | |
1155 | (void)RREG32(RADEON_HOST_PATH_CNTL); | |
1156 | } | |
1157 | ||
1158 | int r100_rb2d_reset(struct radeon_device *rdev) | |
1159 | { | |
1160 | uint32_t tmp; | |
1161 | int i; | |
1162 | ||
1163 | WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2); | |
1164 | (void)RREG32(RADEON_RBBM_SOFT_RESET); | |
1165 | udelay(200); | |
1166 | WREG32(RADEON_RBBM_SOFT_RESET, 0); | |
1167 | /* Wait to prevent race in RBBM_STATUS */ | |
1168 | mdelay(1); | |
1169 | for (i = 0; i < rdev->usec_timeout; i++) { | |
1170 | tmp = RREG32(RADEON_RBBM_STATUS); | |
1171 | if (!(tmp & (1 << 26))) { | |
1172 | DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n", | |
1173 | tmp); | |
1174 | return 0; | |
1175 | } | |
1176 | DRM_UDELAY(1); | |
1177 | } | |
1178 | tmp = RREG32(RADEON_RBBM_STATUS); | |
1179 | DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp); | |
1180 | return -1; | |
1181 | } | |
1182 | ||
1183 | int r100_gpu_reset(struct radeon_device *rdev) | |
1184 | { | |
1185 | uint32_t status; | |
1186 | ||
1187 | /* reset order likely matter */ | |
1188 | status = RREG32(RADEON_RBBM_STATUS); | |
1189 | /* reset HDP */ | |
1190 | r100_hdp_reset(rdev); | |
1191 | /* reset rb2d */ | |
1192 | if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { | |
1193 | r100_rb2d_reset(rdev); | |
1194 | } | |
1195 | /* TODO: reset 3D engine */ | |
1196 | /* reset CP */ | |
1197 | status = RREG32(RADEON_RBBM_STATUS); | |
1198 | if (status & (1 << 16)) { | |
1199 | r100_cp_reset(rdev); | |
1200 | } | |
1201 | /* Check if GPU is idle */ | |
1202 | status = RREG32(RADEON_RBBM_STATUS); | |
1203 | if (status & (1 << 31)) { | |
1204 | DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); | |
1205 | return -1; | |
1206 | } | |
1207 | DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); | |
1208 | return 0; | |
1209 | } | |
1210 | ||
1211 | ||
1212 | /* | |
1213 | * VRAM info | |
1214 | */ | |
1215 | static void r100_vram_get_type(struct radeon_device *rdev) | |
1216 | { | |
1217 | uint32_t tmp; | |
1218 | ||
1219 | rdev->mc.vram_is_ddr = false; | |
1220 | if (rdev->flags & RADEON_IS_IGP) | |
1221 | rdev->mc.vram_is_ddr = true; | |
1222 | else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR) | |
1223 | rdev->mc.vram_is_ddr = true; | |
1224 | if ((rdev->family == CHIP_RV100) || | |
1225 | (rdev->family == CHIP_RS100) || | |
1226 | (rdev->family == CHIP_RS200)) { | |
1227 | tmp = RREG32(RADEON_MEM_CNTL); | |
1228 | if (tmp & RV100_HALF_MODE) { | |
1229 | rdev->mc.vram_width = 32; | |
1230 | } else { | |
1231 | rdev->mc.vram_width = 64; | |
1232 | } | |
1233 | if (rdev->flags & RADEON_SINGLE_CRTC) { | |
1234 | rdev->mc.vram_width /= 4; | |
1235 | rdev->mc.vram_is_ddr = true; | |
1236 | } | |
1237 | } else if (rdev->family <= CHIP_RV280) { | |
1238 | tmp = RREG32(RADEON_MEM_CNTL); | |
1239 | if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) { | |
1240 | rdev->mc.vram_width = 128; | |
1241 | } else { | |
1242 | rdev->mc.vram_width = 64; | |
1243 | } | |
1244 | } else { | |
1245 | /* newer IGPs */ | |
1246 | rdev->mc.vram_width = 128; | |
1247 | } | |
1248 | } | |
1249 | ||
1250 | void r100_vram_info(struct radeon_device *rdev) | |
1251 | { | |
1252 | r100_vram_get_type(rdev); | |
1253 | ||
1254 | if (rdev->flags & RADEON_IS_IGP) { | |
1255 | uint32_t tom; | |
1256 | /* read NB_TOM to get the amount of ram stolen for the GPU */ | |
1257 | tom = RREG32(RADEON_NB_TOM); | |
1258 | rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); | |
1259 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | |
1260 | } else { | |
1261 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | |
1262 | /* Some production boards of m6 will report 0 | |
1263 | * if it's 8 MB | |
1264 | */ | |
1265 | if (rdev->mc.vram_size == 0) { | |
1266 | rdev->mc.vram_size = 8192 * 1024; | |
1267 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | |
1268 | } | |
1269 | } | |
1270 | ||
1271 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | |
1272 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | |
771fe6b9 JG |
1273 | } |
1274 | ||
1275 | ||
1276 | /* | |
1277 | * Indirect registers accessor | |
1278 | */ | |
1279 | void r100_pll_errata_after_index(struct radeon_device *rdev) | |
1280 | { | |
1281 | if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) { | |
1282 | return; | |
1283 | } | |
1284 | (void)RREG32(RADEON_CLOCK_CNTL_DATA); | |
1285 | (void)RREG32(RADEON_CRTC_GEN_CNTL); | |
1286 | } | |
1287 | ||
1288 | static void r100_pll_errata_after_data(struct radeon_device *rdev) | |
1289 | { | |
1290 | /* This workarounds is necessary on RV100, RS100 and RS200 chips | |
1291 | * or the chip could hang on a subsequent access | |
1292 | */ | |
1293 | if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { | |
1294 | udelay(5000); | |
1295 | } | |
1296 | ||
1297 | /* This function is required to workaround a hardware bug in some (all?) | |
1298 | * revisions of the R300. This workaround should be called after every | |
1299 | * CLOCK_CNTL_INDEX register access. If not, register reads afterward | |
1300 | * may not be correct. | |
1301 | */ | |
1302 | if (rdev->pll_errata & CHIP_ERRATA_R300_CG) { | |
1303 | uint32_t save, tmp; | |
1304 | ||
1305 | save = RREG32(RADEON_CLOCK_CNTL_INDEX); | |
1306 | tmp = save & ~(0x3f | RADEON_PLL_WR_EN); | |
1307 | WREG32(RADEON_CLOCK_CNTL_INDEX, tmp); | |
1308 | tmp = RREG32(RADEON_CLOCK_CNTL_DATA); | |
1309 | WREG32(RADEON_CLOCK_CNTL_INDEX, save); | |
1310 | } | |
1311 | } | |
1312 | ||
1313 | uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) | |
1314 | { | |
1315 | uint32_t data; | |
1316 | ||
1317 | WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); | |
1318 | r100_pll_errata_after_index(rdev); | |
1319 | data = RREG32(RADEON_CLOCK_CNTL_DATA); | |
1320 | r100_pll_errata_after_data(rdev); | |
1321 | return data; | |
1322 | } | |
1323 | ||
1324 | void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |
1325 | { | |
1326 | WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); | |
1327 | r100_pll_errata_after_index(rdev); | |
1328 | WREG32(RADEON_CLOCK_CNTL_DATA, v); | |
1329 | r100_pll_errata_after_data(rdev); | |
1330 | } | |
1331 | ||
1332 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) | |
1333 | { | |
1334 | if (reg < 0x10000) | |
1335 | return readl(((void __iomem *)rdev->rmmio) + reg); | |
1336 | else { | |
1337 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); | |
1338 | return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); | |
1339 | } | |
1340 | } | |
1341 | ||
1342 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |
1343 | { | |
1344 | if (reg < 0x10000) | |
1345 | writel(v, ((void __iomem *)rdev->rmmio) + reg); | |
1346 | else { | |
1347 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); | |
1348 | writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); | |
1349 | } | |
1350 | } | |
1351 | ||
1352 | /* | |
1353 | * Debugfs info | |
1354 | */ | |
1355 | #if defined(CONFIG_DEBUG_FS) | |
1356 | static int r100_debugfs_rbbm_info(struct seq_file *m, void *data) | |
1357 | { | |
1358 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
1359 | struct drm_device *dev = node->minor->dev; | |
1360 | struct radeon_device *rdev = dev->dev_private; | |
1361 | uint32_t reg, value; | |
1362 | unsigned i; | |
1363 | ||
1364 | seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS)); | |
1365 | seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C)); | |
1366 | seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); | |
1367 | for (i = 0; i < 64; i++) { | |
1368 | WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100); | |
1369 | reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2; | |
1370 | WREG32(RADEON_RBBM_CMDFIFO_ADDR, i); | |
1371 | value = RREG32(RADEON_RBBM_CMDFIFO_DATA); | |
1372 | seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value); | |
1373 | } | |
1374 | return 0; | |
1375 | } | |
1376 | ||
1377 | static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) | |
1378 | { | |
1379 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
1380 | struct drm_device *dev = node->minor->dev; | |
1381 | struct radeon_device *rdev = dev->dev_private; | |
1382 | uint32_t rdp, wdp; | |
1383 | unsigned count, i, j; | |
1384 | ||
1385 | radeon_ring_free_size(rdev); | |
1386 | rdp = RREG32(RADEON_CP_RB_RPTR); | |
1387 | wdp = RREG32(RADEON_CP_RB_WPTR); | |
1388 | count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask; | |
1389 | seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); | |
1390 | seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); | |
1391 | seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); | |
1392 | seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw); | |
1393 | seq_printf(m, "%u dwords in ring\n", count); | |
1394 | for (j = 0; j <= count; j++) { | |
1395 | i = (rdp + j) & rdev->cp.ptr_mask; | |
1396 | seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]); | |
1397 | } | |
1398 | return 0; | |
1399 | } | |
1400 | ||
1401 | ||
1402 | static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data) | |
1403 | { | |
1404 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
1405 | struct drm_device *dev = node->minor->dev; | |
1406 | struct radeon_device *rdev = dev->dev_private; | |
1407 | uint32_t csq_stat, csq2_stat, tmp; | |
1408 | unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr; | |
1409 | unsigned i; | |
1410 | ||
1411 | seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); | |
1412 | seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE)); | |
1413 | csq_stat = RREG32(RADEON_CP_CSQ_STAT); | |
1414 | csq2_stat = RREG32(RADEON_CP_CSQ2_STAT); | |
1415 | r_rptr = (csq_stat >> 0) & 0x3ff; | |
1416 | r_wptr = (csq_stat >> 10) & 0x3ff; | |
1417 | ib1_rptr = (csq_stat >> 20) & 0x3ff; | |
1418 | ib1_wptr = (csq2_stat >> 0) & 0x3ff; | |
1419 | ib2_rptr = (csq2_stat >> 10) & 0x3ff; | |
1420 | ib2_wptr = (csq2_stat >> 20) & 0x3ff; | |
1421 | seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat); | |
1422 | seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat); | |
1423 | seq_printf(m, "Ring rptr %u\n", r_rptr); | |
1424 | seq_printf(m, "Ring wptr %u\n", r_wptr); | |
1425 | seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr); | |
1426 | seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr); | |
1427 | seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr); | |
1428 | seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr); | |
1429 | /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms | |
1430 | * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */ | |
1431 | seq_printf(m, "Ring fifo:\n"); | |
1432 | for (i = 0; i < 256; i++) { | |
1433 | WREG32(RADEON_CP_CSQ_ADDR, i << 2); | |
1434 | tmp = RREG32(RADEON_CP_CSQ_DATA); | |
1435 | seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp); | |
1436 | } | |
1437 | seq_printf(m, "Indirect1 fifo:\n"); | |
1438 | for (i = 256; i <= 512; i++) { | |
1439 | WREG32(RADEON_CP_CSQ_ADDR, i << 2); | |
1440 | tmp = RREG32(RADEON_CP_CSQ_DATA); | |
1441 | seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp); | |
1442 | } | |
1443 | seq_printf(m, "Indirect2 fifo:\n"); | |
1444 | for (i = 640; i < ib1_wptr; i++) { | |
1445 | WREG32(RADEON_CP_CSQ_ADDR, i << 2); | |
1446 | tmp = RREG32(RADEON_CP_CSQ_DATA); | |
1447 | seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp); | |
1448 | } | |
1449 | return 0; | |
1450 | } | |
1451 | ||
1452 | static int r100_debugfs_mc_info(struct seq_file *m, void *data) | |
1453 | { | |
1454 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
1455 | struct drm_device *dev = node->minor->dev; | |
1456 | struct radeon_device *rdev = dev->dev_private; | |
1457 | uint32_t tmp; | |
1458 | ||
1459 | tmp = RREG32(RADEON_CONFIG_MEMSIZE); | |
1460 | seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp); | |
1461 | tmp = RREG32(RADEON_MC_FB_LOCATION); | |
1462 | seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp); | |
1463 | tmp = RREG32(RADEON_BUS_CNTL); | |
1464 | seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); | |
1465 | tmp = RREG32(RADEON_MC_AGP_LOCATION); | |
1466 | seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); | |
1467 | tmp = RREG32(RADEON_AGP_BASE); | |
1468 | seq_printf(m, "AGP_BASE 0x%08x\n", tmp); | |
1469 | tmp = RREG32(RADEON_HOST_PATH_CNTL); | |
1470 | seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); | |
1471 | tmp = RREG32(0x01D0); | |
1472 | seq_printf(m, "AIC_CTRL 0x%08x\n", tmp); | |
1473 | tmp = RREG32(RADEON_AIC_LO_ADDR); | |
1474 | seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp); | |
1475 | tmp = RREG32(RADEON_AIC_HI_ADDR); | |
1476 | seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp); | |
1477 | tmp = RREG32(0x01E4); | |
1478 | seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp); | |
1479 | return 0; | |
1480 | } | |
1481 | ||
1482 | static struct drm_info_list r100_debugfs_rbbm_list[] = { | |
1483 | {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL}, | |
1484 | }; | |
1485 | ||
1486 | static struct drm_info_list r100_debugfs_cp_list[] = { | |
1487 | {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL}, | |
1488 | {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL}, | |
1489 | }; | |
1490 | ||
1491 | static struct drm_info_list r100_debugfs_mc_info_list[] = { | |
1492 | {"r100_mc_info", r100_debugfs_mc_info, 0, NULL}, | |
1493 | }; | |
1494 | #endif | |
1495 | ||
1496 | int r100_debugfs_rbbm_init(struct radeon_device *rdev) | |
1497 | { | |
1498 | #if defined(CONFIG_DEBUG_FS) | |
1499 | return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1); | |
1500 | #else | |
1501 | return 0; | |
1502 | #endif | |
1503 | } | |
1504 | ||
1505 | int r100_debugfs_cp_init(struct radeon_device *rdev) | |
1506 | { | |
1507 | #if defined(CONFIG_DEBUG_FS) | |
1508 | return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2); | |
1509 | #else | |
1510 | return 0; | |
1511 | #endif | |
1512 | } | |
1513 | ||
1514 | int r100_debugfs_mc_info_init(struct radeon_device *rdev) | |
1515 | { | |
1516 | #if defined(CONFIG_DEBUG_FS) | |
1517 | return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1); | |
1518 | #else | |
1519 | return 0; | |
1520 | #endif | |
1521 | } |