]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/radeon/r100.c
drm/radeon/kms: force legacy pll algo for RV620 LVDS
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / radeon / r100.c
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include "drmP.h"
31 #include "drm.h"
32 #include "radeon_drm.h"
33 #include "radeon_reg.h"
34 #include "radeon.h"
35 #include "radeon_asic.h"
36 #include "r100d.h"
37 #include "rs100d.h"
38 #include "rv200d.h"
39 #include "rv250d.h"
40 #include "atom.h"
41
42 #include <linux/firmware.h>
43 #include <linux/platform_device.h>
44
45 #include "r100_reg_safe.h"
46 #include "rn50_reg_safe.h"
47
48 /* Firmware Names */
49 #define FIRMWARE_R100 "radeon/R100_cp.bin"
50 #define FIRMWARE_R200 "radeon/R200_cp.bin"
51 #define FIRMWARE_R300 "radeon/R300_cp.bin"
52 #define FIRMWARE_R420 "radeon/R420_cp.bin"
53 #define FIRMWARE_RS690 "radeon/RS690_cp.bin"
54 #define FIRMWARE_RS600 "radeon/RS600_cp.bin"
55 #define FIRMWARE_R520 "radeon/R520_cp.bin"
56
57 MODULE_FIRMWARE(FIRMWARE_R100);
58 MODULE_FIRMWARE(FIRMWARE_R200);
59 MODULE_FIRMWARE(FIRMWARE_R300);
60 MODULE_FIRMWARE(FIRMWARE_R420);
61 MODULE_FIRMWARE(FIRMWARE_RS690);
62 MODULE_FIRMWARE(FIRMWARE_RS600);
63 MODULE_FIRMWARE(FIRMWARE_R520);
64
65 #include "r100_track.h"
66
67 /* This files gather functions specifics to:
68 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
69 */
70
71 void r100_pm_get_dynpm_state(struct radeon_device *rdev)
72 {
73 int i;
74 rdev->pm.dynpm_can_upclock = true;
75 rdev->pm.dynpm_can_downclock = true;
76
77 switch (rdev->pm.dynpm_planned_action) {
78 case DYNPM_ACTION_MINIMUM:
79 rdev->pm.requested_power_state_index = 0;
80 rdev->pm.dynpm_can_downclock = false;
81 break;
82 case DYNPM_ACTION_DOWNCLOCK:
83 if (rdev->pm.current_power_state_index == 0) {
84 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
85 rdev->pm.dynpm_can_downclock = false;
86 } else {
87 if (rdev->pm.active_crtc_count > 1) {
88 for (i = 0; i < rdev->pm.num_power_states; i++) {
89 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
90 continue;
91 else if (i >= rdev->pm.current_power_state_index) {
92 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
93 break;
94 } else {
95 rdev->pm.requested_power_state_index = i;
96 break;
97 }
98 }
99 } else
100 rdev->pm.requested_power_state_index =
101 rdev->pm.current_power_state_index - 1;
102 }
103 /* don't use the power state if crtcs are active and no display flag is set */
104 if ((rdev->pm.active_crtc_count > 0) &&
105 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
106 RADEON_PM_MODE_NO_DISPLAY)) {
107 rdev->pm.requested_power_state_index++;
108 }
109 break;
110 case DYNPM_ACTION_UPCLOCK:
111 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
112 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
113 rdev->pm.dynpm_can_upclock = false;
114 } else {
115 if (rdev->pm.active_crtc_count > 1) {
116 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
117 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
118 continue;
119 else if (i <= rdev->pm.current_power_state_index) {
120 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
121 break;
122 } else {
123 rdev->pm.requested_power_state_index = i;
124 break;
125 }
126 }
127 } else
128 rdev->pm.requested_power_state_index =
129 rdev->pm.current_power_state_index + 1;
130 }
131 break;
132 case DYNPM_ACTION_DEFAULT:
133 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
134 rdev->pm.dynpm_can_upclock = false;
135 break;
136 case DYNPM_ACTION_NONE:
137 default:
138 DRM_ERROR("Requested mode for not defined action\n");
139 return;
140 }
141 /* only one clock mode per power state */
142 rdev->pm.requested_clock_mode_index = 0;
143
144 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
145 rdev->pm.power_state[rdev->pm.requested_power_state_index].
146 clock_info[rdev->pm.requested_clock_mode_index].sclk,
147 rdev->pm.power_state[rdev->pm.requested_power_state_index].
148 clock_info[rdev->pm.requested_clock_mode_index].mclk,
149 rdev->pm.power_state[rdev->pm.requested_power_state_index].
150 pcie_lanes);
151 }
152
153 void r100_pm_init_profile(struct radeon_device *rdev)
154 {
155 /* default */
156 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
157 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
158 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
159 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
160 /* low sh */
161 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
162 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
163 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
164 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
165 /* mid sh */
166 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
167 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
168 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
169 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
170 /* high sh */
171 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
172 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
173 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
174 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
175 /* low mh */
176 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
177 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
178 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
179 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
180 /* mid mh */
181 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
182 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
183 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
184 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
185 /* high mh */
186 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
187 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
188 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
189 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
190 }
191
192 void r100_pm_misc(struct radeon_device *rdev)
193 {
194 int requested_index = rdev->pm.requested_power_state_index;
195 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
196 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
197 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
198
199 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
200 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
201 tmp = RREG32(voltage->gpio.reg);
202 if (voltage->active_high)
203 tmp |= voltage->gpio.mask;
204 else
205 tmp &= ~(voltage->gpio.mask);
206 WREG32(voltage->gpio.reg, tmp);
207 if (voltage->delay)
208 udelay(voltage->delay);
209 } else {
210 tmp = RREG32(voltage->gpio.reg);
211 if (voltage->active_high)
212 tmp &= ~voltage->gpio.mask;
213 else
214 tmp |= voltage->gpio.mask;
215 WREG32(voltage->gpio.reg, tmp);
216 if (voltage->delay)
217 udelay(voltage->delay);
218 }
219 }
220
221 sclk_cntl = RREG32_PLL(SCLK_CNTL);
222 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
223 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
224 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
225 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
226 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
227 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
228 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
229 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
230 else
231 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
232 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
233 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
234 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
235 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
236 } else
237 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
238
239 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
240 sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
241 if (voltage->delay) {
242 sclk_more_cntl |= VOLTAGE_DROP_SYNC;
243 switch (voltage->delay) {
244 case 33:
245 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
246 break;
247 case 66:
248 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
249 break;
250 case 99:
251 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
252 break;
253 case 132:
254 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
255 break;
256 }
257 } else
258 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
259 } else
260 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
261
262 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
263 sclk_cntl &= ~FORCE_HDP;
264 else
265 sclk_cntl |= FORCE_HDP;
266
267 WREG32_PLL(SCLK_CNTL, sclk_cntl);
268 WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
269 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
270
271 /* set pcie lanes */
272 if ((rdev->flags & RADEON_IS_PCIE) &&
273 !(rdev->flags & RADEON_IS_IGP) &&
274 rdev->asic->set_pcie_lanes &&
275 (ps->pcie_lanes !=
276 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
277 radeon_set_pcie_lanes(rdev,
278 ps->pcie_lanes);
279 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
280 }
281 }
282
283 void r100_pm_prepare(struct radeon_device *rdev)
284 {
285 struct drm_device *ddev = rdev->ddev;
286 struct drm_crtc *crtc;
287 struct radeon_crtc *radeon_crtc;
288 u32 tmp;
289
290 /* disable any active CRTCs */
291 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
292 radeon_crtc = to_radeon_crtc(crtc);
293 if (radeon_crtc->enabled) {
294 if (radeon_crtc->crtc_id) {
295 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
296 tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
297 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
298 } else {
299 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
300 tmp |= RADEON_CRTC_DISP_REQ_EN_B;
301 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
302 }
303 }
304 }
305 }
306
307 void r100_pm_finish(struct radeon_device *rdev)
308 {
309 struct drm_device *ddev = rdev->ddev;
310 struct drm_crtc *crtc;
311 struct radeon_crtc *radeon_crtc;
312 u32 tmp;
313
314 /* enable any active CRTCs */
315 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
316 radeon_crtc = to_radeon_crtc(crtc);
317 if (radeon_crtc->enabled) {
318 if (radeon_crtc->crtc_id) {
319 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
320 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
321 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
322 } else {
323 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
324 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
325 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
326 }
327 }
328 }
329 }
330
331 bool r100_gui_idle(struct radeon_device *rdev)
332 {
333 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
334 return false;
335 else
336 return true;
337 }
338
339 /* hpd for digital panel detect/disconnect */
340 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
341 {
342 bool connected = false;
343
344 switch (hpd) {
345 case RADEON_HPD_1:
346 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
347 connected = true;
348 break;
349 case RADEON_HPD_2:
350 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
351 connected = true;
352 break;
353 default:
354 break;
355 }
356 return connected;
357 }
358
359 void r100_hpd_set_polarity(struct radeon_device *rdev,
360 enum radeon_hpd_id hpd)
361 {
362 u32 tmp;
363 bool connected = r100_hpd_sense(rdev, hpd);
364
365 switch (hpd) {
366 case RADEON_HPD_1:
367 tmp = RREG32(RADEON_FP_GEN_CNTL);
368 if (connected)
369 tmp &= ~RADEON_FP_DETECT_INT_POL;
370 else
371 tmp |= RADEON_FP_DETECT_INT_POL;
372 WREG32(RADEON_FP_GEN_CNTL, tmp);
373 break;
374 case RADEON_HPD_2:
375 tmp = RREG32(RADEON_FP2_GEN_CNTL);
376 if (connected)
377 tmp &= ~RADEON_FP2_DETECT_INT_POL;
378 else
379 tmp |= RADEON_FP2_DETECT_INT_POL;
380 WREG32(RADEON_FP2_GEN_CNTL, tmp);
381 break;
382 default:
383 break;
384 }
385 }
386
387 void r100_hpd_init(struct radeon_device *rdev)
388 {
389 struct drm_device *dev = rdev->ddev;
390 struct drm_connector *connector;
391
392 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
393 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
394 switch (radeon_connector->hpd.hpd) {
395 case RADEON_HPD_1:
396 rdev->irq.hpd[0] = true;
397 break;
398 case RADEON_HPD_2:
399 rdev->irq.hpd[1] = true;
400 break;
401 default:
402 break;
403 }
404 }
405 if (rdev->irq.installed)
406 r100_irq_set(rdev);
407 }
408
409 void r100_hpd_fini(struct radeon_device *rdev)
410 {
411 struct drm_device *dev = rdev->ddev;
412 struct drm_connector *connector;
413
414 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
415 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
416 switch (radeon_connector->hpd.hpd) {
417 case RADEON_HPD_1:
418 rdev->irq.hpd[0] = false;
419 break;
420 case RADEON_HPD_2:
421 rdev->irq.hpd[1] = false;
422 break;
423 default:
424 break;
425 }
426 }
427 }
428
429 /*
430 * PCI GART
431 */
432 void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
433 {
434 /* TODO: can we do somethings here ? */
435 /* It seems hw only cache one entry so we should discard this
436 * entry otherwise if first GPU GART read hit this entry it
437 * could end up in wrong address. */
438 }
439
440 int r100_pci_gart_init(struct radeon_device *rdev)
441 {
442 int r;
443
444 if (rdev->gart.table.ram.ptr) {
445 WARN(1, "R100 PCI GART already initialized.\n");
446 return 0;
447 }
448 /* Initialize common gart structure */
449 r = radeon_gart_init(rdev);
450 if (r)
451 return r;
452 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
453 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
454 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
455 return radeon_gart_table_ram_alloc(rdev);
456 }
457
458 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
459 void r100_enable_bm(struct radeon_device *rdev)
460 {
461 uint32_t tmp;
462 /* Enable bus mastering */
463 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
464 WREG32(RADEON_BUS_CNTL, tmp);
465 }
466
467 int r100_pci_gart_enable(struct radeon_device *rdev)
468 {
469 uint32_t tmp;
470
471 radeon_gart_restore(rdev);
472 /* discard memory request outside of configured range */
473 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
474 WREG32(RADEON_AIC_CNTL, tmp);
475 /* set address range for PCI address translate */
476 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
477 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
478 /* set PCI GART page-table base address */
479 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
480 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
481 WREG32(RADEON_AIC_CNTL, tmp);
482 r100_pci_gart_tlb_flush(rdev);
483 rdev->gart.ready = true;
484 return 0;
485 }
486
487 void r100_pci_gart_disable(struct radeon_device *rdev)
488 {
489 uint32_t tmp;
490
491 /* discard memory request outside of configured range */
492 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
493 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
494 WREG32(RADEON_AIC_LO_ADDR, 0);
495 WREG32(RADEON_AIC_HI_ADDR, 0);
496 }
497
498 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
499 {
500 if (i < 0 || i > rdev->gart.num_gpu_pages) {
501 return -EINVAL;
502 }
503 rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
504 return 0;
505 }
506
507 void r100_pci_gart_fini(struct radeon_device *rdev)
508 {
509 radeon_gart_fini(rdev);
510 r100_pci_gart_disable(rdev);
511 radeon_gart_table_ram_free(rdev);
512 }
513
514 int r100_irq_set(struct radeon_device *rdev)
515 {
516 uint32_t tmp = 0;
517
518 if (!rdev->irq.installed) {
519 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
520 WREG32(R_000040_GEN_INT_CNTL, 0);
521 return -EINVAL;
522 }
523 if (rdev->irq.sw_int) {
524 tmp |= RADEON_SW_INT_ENABLE;
525 }
526 if (rdev->irq.gui_idle) {
527 tmp |= RADEON_GUI_IDLE_MASK;
528 }
529 if (rdev->irq.crtc_vblank_int[0]) {
530 tmp |= RADEON_CRTC_VBLANK_MASK;
531 }
532 if (rdev->irq.crtc_vblank_int[1]) {
533 tmp |= RADEON_CRTC2_VBLANK_MASK;
534 }
535 if (rdev->irq.hpd[0]) {
536 tmp |= RADEON_FP_DETECT_MASK;
537 }
538 if (rdev->irq.hpd[1]) {
539 tmp |= RADEON_FP2_DETECT_MASK;
540 }
541 WREG32(RADEON_GEN_INT_CNTL, tmp);
542 return 0;
543 }
544
545 void r100_irq_disable(struct radeon_device *rdev)
546 {
547 u32 tmp;
548
549 WREG32(R_000040_GEN_INT_CNTL, 0);
550 /* Wait and acknowledge irq */
551 mdelay(1);
552 tmp = RREG32(R_000044_GEN_INT_STATUS);
553 WREG32(R_000044_GEN_INT_STATUS, tmp);
554 }
555
556 static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
557 {
558 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
559 uint32_t irq_mask = RADEON_SW_INT_TEST |
560 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
561 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
562
563 /* the interrupt works, but the status bit is permanently asserted */
564 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
565 if (!rdev->irq.gui_idle_acked)
566 irq_mask |= RADEON_GUI_IDLE_STAT;
567 }
568
569 if (irqs) {
570 WREG32(RADEON_GEN_INT_STATUS, irqs);
571 }
572 return irqs & irq_mask;
573 }
574
575 int r100_irq_process(struct radeon_device *rdev)
576 {
577 uint32_t status, msi_rearm;
578 bool queue_hotplug = false;
579
580 /* reset gui idle ack. the status bit is broken */
581 rdev->irq.gui_idle_acked = false;
582
583 status = r100_irq_ack(rdev);
584 if (!status) {
585 return IRQ_NONE;
586 }
587 if (rdev->shutdown) {
588 return IRQ_NONE;
589 }
590 while (status) {
591 /* SW interrupt */
592 if (status & RADEON_SW_INT_TEST) {
593 radeon_fence_process(rdev);
594 }
595 /* gui idle interrupt */
596 if (status & RADEON_GUI_IDLE_STAT) {
597 rdev->irq.gui_idle_acked = true;
598 rdev->pm.gui_idle = true;
599 wake_up(&rdev->irq.idle_queue);
600 }
601 /* Vertical blank interrupts */
602 if (status & RADEON_CRTC_VBLANK_STAT) {
603 drm_handle_vblank(rdev->ddev, 0);
604 rdev->pm.vblank_sync = true;
605 wake_up(&rdev->irq.vblank_queue);
606 }
607 if (status & RADEON_CRTC2_VBLANK_STAT) {
608 drm_handle_vblank(rdev->ddev, 1);
609 rdev->pm.vblank_sync = true;
610 wake_up(&rdev->irq.vblank_queue);
611 }
612 if (status & RADEON_FP_DETECT_STAT) {
613 queue_hotplug = true;
614 DRM_DEBUG("HPD1\n");
615 }
616 if (status & RADEON_FP2_DETECT_STAT) {
617 queue_hotplug = true;
618 DRM_DEBUG("HPD2\n");
619 }
620 status = r100_irq_ack(rdev);
621 }
622 /* reset gui idle ack. the status bit is broken */
623 rdev->irq.gui_idle_acked = false;
624 if (queue_hotplug)
625 queue_work(rdev->wq, &rdev->hotplug_work);
626 if (rdev->msi_enabled) {
627 switch (rdev->family) {
628 case CHIP_RS400:
629 case CHIP_RS480:
630 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
631 WREG32(RADEON_AIC_CNTL, msi_rearm);
632 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
633 break;
634 default:
635 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
636 WREG32(RADEON_MSI_REARM_EN, msi_rearm);
637 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
638 break;
639 }
640 }
641 return IRQ_HANDLED;
642 }
643
644 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
645 {
646 if (crtc == 0)
647 return RREG32(RADEON_CRTC_CRNT_FRAME);
648 else
649 return RREG32(RADEON_CRTC2_CRNT_FRAME);
650 }
651
652 /* Who ever call radeon_fence_emit should call ring_lock and ask
653 * for enough space (today caller are ib schedule and buffer move) */
654 void r100_fence_ring_emit(struct radeon_device *rdev,
655 struct radeon_fence *fence)
656 {
657 /* We have to make sure that caches are flushed before
658 * CPU might read something from VRAM. */
659 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
660 radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
661 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
662 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
663 /* Wait until IDLE & CLEAN */
664 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
665 radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
666 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
667 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
668 RADEON_HDP_READ_BUFFER_INVALIDATE);
669 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
670 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
671 /* Emit fence sequence & fire IRQ */
672 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
673 radeon_ring_write(rdev, fence->seq);
674 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
675 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
676 }
677
678 int r100_wb_init(struct radeon_device *rdev)
679 {
680 int r;
681
682 if (rdev->wb.wb_obj == NULL) {
683 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
684 RADEON_GEM_DOMAIN_GTT,
685 &rdev->wb.wb_obj);
686 if (r) {
687 dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
688 return r;
689 }
690 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
691 if (unlikely(r != 0))
692 return r;
693 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
694 &rdev->wb.gpu_addr);
695 if (r) {
696 dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
697 radeon_bo_unreserve(rdev->wb.wb_obj);
698 return r;
699 }
700 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
701 radeon_bo_unreserve(rdev->wb.wb_obj);
702 if (r) {
703 dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
704 return r;
705 }
706 }
707 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr);
708 WREG32(R_00070C_CP_RB_RPTR_ADDR,
709 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2));
710 WREG32(R_000770_SCRATCH_UMSK, 0xff);
711 return 0;
712 }
713
714 void r100_wb_disable(struct radeon_device *rdev)
715 {
716 WREG32(R_000770_SCRATCH_UMSK, 0);
717 }
718
719 void r100_wb_fini(struct radeon_device *rdev)
720 {
721 int r;
722
723 r100_wb_disable(rdev);
724 if (rdev->wb.wb_obj) {
725 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
726 if (unlikely(r != 0)) {
727 dev_err(rdev->dev, "(%d) can't finish WB\n", r);
728 return;
729 }
730 radeon_bo_kunmap(rdev->wb.wb_obj);
731 radeon_bo_unpin(rdev->wb.wb_obj);
732 radeon_bo_unreserve(rdev->wb.wb_obj);
733 radeon_bo_unref(&rdev->wb.wb_obj);
734 rdev->wb.wb = NULL;
735 rdev->wb.wb_obj = NULL;
736 }
737 }
738
739 int r100_copy_blit(struct radeon_device *rdev,
740 uint64_t src_offset,
741 uint64_t dst_offset,
742 unsigned num_pages,
743 struct radeon_fence *fence)
744 {
745 uint32_t cur_pages;
746 uint32_t stride_bytes = PAGE_SIZE;
747 uint32_t pitch;
748 uint32_t stride_pixels;
749 unsigned ndw;
750 int num_loops;
751 int r = 0;
752
753 /* radeon limited to 16k stride */
754 stride_bytes &= 0x3fff;
755 /* radeon pitch is /64 */
756 pitch = stride_bytes / 64;
757 stride_pixels = stride_bytes / 4;
758 num_loops = DIV_ROUND_UP(num_pages, 8191);
759
760 /* Ask for enough room for blit + flush + fence */
761 ndw = 64 + (10 * num_loops);
762 r = radeon_ring_lock(rdev, ndw);
763 if (r) {
764 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
765 return -EINVAL;
766 }
767 while (num_pages > 0) {
768 cur_pages = num_pages;
769 if (cur_pages > 8191) {
770 cur_pages = 8191;
771 }
772 num_pages -= cur_pages;
773
774 /* pages are in Y direction - height
775 page width in X direction - width */
776 radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
777 radeon_ring_write(rdev,
778 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
779 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
780 RADEON_GMC_SRC_CLIPPING |
781 RADEON_GMC_DST_CLIPPING |
782 RADEON_GMC_BRUSH_NONE |
783 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
784 RADEON_GMC_SRC_DATATYPE_COLOR |
785 RADEON_ROP3_S |
786 RADEON_DP_SRC_SOURCE_MEMORY |
787 RADEON_GMC_CLR_CMP_CNTL_DIS |
788 RADEON_GMC_WR_MSK_DIS);
789 radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
790 radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
791 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
792 radeon_ring_write(rdev, 0);
793 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
794 radeon_ring_write(rdev, num_pages);
795 radeon_ring_write(rdev, num_pages);
796 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
797 }
798 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
799 radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
800 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
801 radeon_ring_write(rdev,
802 RADEON_WAIT_2D_IDLECLEAN |
803 RADEON_WAIT_HOST_IDLECLEAN |
804 RADEON_WAIT_DMA_GUI_IDLE);
805 if (fence) {
806 r = radeon_fence_emit(rdev, fence);
807 }
808 radeon_ring_unlock_commit(rdev);
809 return r;
810 }
811
812 static int r100_cp_wait_for_idle(struct radeon_device *rdev)
813 {
814 unsigned i;
815 u32 tmp;
816
817 for (i = 0; i < rdev->usec_timeout; i++) {
818 tmp = RREG32(R_000E40_RBBM_STATUS);
819 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
820 return 0;
821 }
822 udelay(1);
823 }
824 return -1;
825 }
826
827 void r100_ring_start(struct radeon_device *rdev)
828 {
829 int r;
830
831 r = radeon_ring_lock(rdev, 2);
832 if (r) {
833 return;
834 }
835 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
836 radeon_ring_write(rdev,
837 RADEON_ISYNC_ANY2D_IDLE3D |
838 RADEON_ISYNC_ANY3D_IDLE2D |
839 RADEON_ISYNC_WAIT_IDLEGUI |
840 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
841 radeon_ring_unlock_commit(rdev);
842 }
843
844
845 /* Load the microcode for the CP */
846 static int r100_cp_init_microcode(struct radeon_device *rdev)
847 {
848 struct platform_device *pdev;
849 const char *fw_name = NULL;
850 int err;
851
852 DRM_DEBUG_KMS("\n");
853
854 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
855 err = IS_ERR(pdev);
856 if (err) {
857 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
858 return -EINVAL;
859 }
860 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
861 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
862 (rdev->family == CHIP_RS200)) {
863 DRM_INFO("Loading R100 Microcode\n");
864 fw_name = FIRMWARE_R100;
865 } else if ((rdev->family == CHIP_R200) ||
866 (rdev->family == CHIP_RV250) ||
867 (rdev->family == CHIP_RV280) ||
868 (rdev->family == CHIP_RS300)) {
869 DRM_INFO("Loading R200 Microcode\n");
870 fw_name = FIRMWARE_R200;
871 } else if ((rdev->family == CHIP_R300) ||
872 (rdev->family == CHIP_R350) ||
873 (rdev->family == CHIP_RV350) ||
874 (rdev->family == CHIP_RV380) ||
875 (rdev->family == CHIP_RS400) ||
876 (rdev->family == CHIP_RS480)) {
877 DRM_INFO("Loading R300 Microcode\n");
878 fw_name = FIRMWARE_R300;
879 } else if ((rdev->family == CHIP_R420) ||
880 (rdev->family == CHIP_R423) ||
881 (rdev->family == CHIP_RV410)) {
882 DRM_INFO("Loading R400 Microcode\n");
883 fw_name = FIRMWARE_R420;
884 } else if ((rdev->family == CHIP_RS690) ||
885 (rdev->family == CHIP_RS740)) {
886 DRM_INFO("Loading RS690/RS740 Microcode\n");
887 fw_name = FIRMWARE_RS690;
888 } else if (rdev->family == CHIP_RS600) {
889 DRM_INFO("Loading RS600 Microcode\n");
890 fw_name = FIRMWARE_RS600;
891 } else if ((rdev->family == CHIP_RV515) ||
892 (rdev->family == CHIP_R520) ||
893 (rdev->family == CHIP_RV530) ||
894 (rdev->family == CHIP_R580) ||
895 (rdev->family == CHIP_RV560) ||
896 (rdev->family == CHIP_RV570)) {
897 DRM_INFO("Loading R500 Microcode\n");
898 fw_name = FIRMWARE_R520;
899 }
900
901 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
902 platform_device_unregister(pdev);
903 if (err) {
904 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
905 fw_name);
906 } else if (rdev->me_fw->size % 8) {
907 printk(KERN_ERR
908 "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
909 rdev->me_fw->size, fw_name);
910 err = -EINVAL;
911 release_firmware(rdev->me_fw);
912 rdev->me_fw = NULL;
913 }
914 return err;
915 }
916
917 static void r100_cp_load_microcode(struct radeon_device *rdev)
918 {
919 const __be32 *fw_data;
920 int i, size;
921
922 if (r100_gui_wait_for_idle(rdev)) {
923 printk(KERN_WARNING "Failed to wait GUI idle while "
924 "programming pipes. Bad things might happen.\n");
925 }
926
927 if (rdev->me_fw) {
928 size = rdev->me_fw->size / 4;
929 fw_data = (const __be32 *)&rdev->me_fw->data[0];
930 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
931 for (i = 0; i < size; i += 2) {
932 WREG32(RADEON_CP_ME_RAM_DATAH,
933 be32_to_cpup(&fw_data[i]));
934 WREG32(RADEON_CP_ME_RAM_DATAL,
935 be32_to_cpup(&fw_data[i + 1]));
936 }
937 }
938 }
939
940 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
941 {
942 unsigned rb_bufsz;
943 unsigned rb_blksz;
944 unsigned max_fetch;
945 unsigned pre_write_timer;
946 unsigned pre_write_limit;
947 unsigned indirect2_start;
948 unsigned indirect1_start;
949 uint32_t tmp;
950 int r;
951
952 if (r100_debugfs_cp_init(rdev)) {
953 DRM_ERROR("Failed to register debugfs file for CP !\n");
954 }
955 if (!rdev->me_fw) {
956 r = r100_cp_init_microcode(rdev);
957 if (r) {
958 DRM_ERROR("Failed to load firmware!\n");
959 return r;
960 }
961 }
962
963 /* Align ring size */
964 rb_bufsz = drm_order(ring_size / 8);
965 ring_size = (1 << (rb_bufsz + 1)) * 4;
966 r100_cp_load_microcode(rdev);
967 r = radeon_ring_init(rdev, ring_size);
968 if (r) {
969 return r;
970 }
971 /* Each time the cp read 1024 bytes (16 dword/quadword) update
972 * the rptr copy in system ram */
973 rb_blksz = 9;
974 /* cp will read 128bytes at a time (4 dwords) */
975 max_fetch = 1;
976 rdev->cp.align_mask = 16 - 1;
977 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
978 pre_write_timer = 64;
979 /* Force CP_RB_WPTR write if written more than one time before the
980 * delay expire
981 */
982 pre_write_limit = 0;
983 /* Setup the cp cache like this (cache size is 96 dwords) :
984 * RING 0 to 15
985 * INDIRECT1 16 to 79
986 * INDIRECT2 80 to 95
987 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
988 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
989 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
990 * Idea being that most of the gpu cmd will be through indirect1 buffer
991 * so it gets the bigger cache.
992 */
993 indirect2_start = 80;
994 indirect1_start = 16;
995 /* cp setup */
996 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
997 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
998 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
999 REG_SET(RADEON_MAX_FETCH, max_fetch) |
1000 RADEON_RB_NO_UPDATE);
1001 #ifdef __BIG_ENDIAN
1002 tmp |= RADEON_BUF_SWAP_32BIT;
1003 #endif
1004 WREG32(RADEON_CP_RB_CNTL, tmp);
1005
1006 /* Set ring address */
1007 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
1008 WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
1009 /* Force read & write ptr to 0 */
1010 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
1011 WREG32(RADEON_CP_RB_RPTR_WR, 0);
1012 WREG32(RADEON_CP_RB_WPTR, 0);
1013 WREG32(RADEON_CP_RB_CNTL, tmp);
1014 udelay(10);
1015 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
1016 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
1017 /* protect against crazy HW on resume */
1018 rdev->cp.wptr &= rdev->cp.ptr_mask;
1019 /* Set cp mode to bus mastering & enable cp*/
1020 WREG32(RADEON_CP_CSQ_MODE,
1021 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
1022 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
1023 WREG32(0x718, 0);
1024 WREG32(0x744, 0x00004D4D);
1025 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1026 radeon_ring_start(rdev);
1027 r = radeon_ring_test(rdev);
1028 if (r) {
1029 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
1030 return r;
1031 }
1032 rdev->cp.ready = true;
1033 return 0;
1034 }
1035
1036 void r100_cp_fini(struct radeon_device *rdev)
1037 {
1038 if (r100_cp_wait_for_idle(rdev)) {
1039 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
1040 }
1041 /* Disable ring */
1042 r100_cp_disable(rdev);
1043 radeon_ring_fini(rdev);
1044 DRM_INFO("radeon: cp finalized\n");
1045 }
1046
1047 void r100_cp_disable(struct radeon_device *rdev)
1048 {
1049 /* Disable ring */
1050 rdev->cp.ready = false;
1051 WREG32(RADEON_CP_CSQ_MODE, 0);
1052 WREG32(RADEON_CP_CSQ_CNTL, 0);
1053 if (r100_gui_wait_for_idle(rdev)) {
1054 printk(KERN_WARNING "Failed to wait GUI idle while "
1055 "programming pipes. Bad things might happen.\n");
1056 }
1057 }
1058
1059 void r100_cp_commit(struct radeon_device *rdev)
1060 {
1061 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
1062 (void)RREG32(RADEON_CP_RB_WPTR);
1063 }
1064
1065
1066 /*
1067 * CS functions
1068 */
1069 int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1070 struct radeon_cs_packet *pkt,
1071 const unsigned *auth, unsigned n,
1072 radeon_packet0_check_t check)
1073 {
1074 unsigned reg;
1075 unsigned i, j, m;
1076 unsigned idx;
1077 int r;
1078
1079 idx = pkt->idx + 1;
1080 reg = pkt->reg;
1081 /* Check that register fall into register range
1082 * determined by the number of entry (n) in the
1083 * safe register bitmap.
1084 */
1085 if (pkt->one_reg_wr) {
1086 if ((reg >> 7) > n) {
1087 return -EINVAL;
1088 }
1089 } else {
1090 if (((reg + (pkt->count << 2)) >> 7) > n) {
1091 return -EINVAL;
1092 }
1093 }
1094 for (i = 0; i <= pkt->count; i++, idx++) {
1095 j = (reg >> 7);
1096 m = 1 << ((reg >> 2) & 31);
1097 if (auth[j] & m) {
1098 r = check(p, pkt, idx, reg);
1099 if (r) {
1100 return r;
1101 }
1102 }
1103 if (pkt->one_reg_wr) {
1104 if (!(auth[j] & m)) {
1105 break;
1106 }
1107 } else {
1108 reg += 4;
1109 }
1110 }
1111 return 0;
1112 }
1113
1114 void r100_cs_dump_packet(struct radeon_cs_parser *p,
1115 struct radeon_cs_packet *pkt)
1116 {
1117 volatile uint32_t *ib;
1118 unsigned i;
1119 unsigned idx;
1120
1121 ib = p->ib->ptr;
1122 idx = pkt->idx;
1123 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
1124 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
1125 }
1126 }
1127
1128 /**
1129 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
1130 * @parser: parser structure holding parsing context.
1131 * @pkt: where to store packet informations
1132 *
1133 * Assume that chunk_ib_index is properly set. Will return -EINVAL
1134 * if packet is bigger than remaining ib size. or if packets is unknown.
1135 **/
1136 int r100_cs_packet_parse(struct radeon_cs_parser *p,
1137 struct radeon_cs_packet *pkt,
1138 unsigned idx)
1139 {
1140 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
1141 uint32_t header;
1142
1143 if (idx >= ib_chunk->length_dw) {
1144 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
1145 idx, ib_chunk->length_dw);
1146 return -EINVAL;
1147 }
1148 header = radeon_get_ib_value(p, idx);
1149 pkt->idx = idx;
1150 pkt->type = CP_PACKET_GET_TYPE(header);
1151 pkt->count = CP_PACKET_GET_COUNT(header);
1152 switch (pkt->type) {
1153 case PACKET_TYPE0:
1154 pkt->reg = CP_PACKET0_GET_REG(header);
1155 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
1156 break;
1157 case PACKET_TYPE3:
1158 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
1159 break;
1160 case PACKET_TYPE2:
1161 pkt->count = -1;
1162 break;
1163 default:
1164 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
1165 return -EINVAL;
1166 }
1167 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
1168 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
1169 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
1170 return -EINVAL;
1171 }
1172 return 0;
1173 }
1174
1175 /**
1176 * r100_cs_packet_next_vline() - parse userspace VLINE packet
1177 * @parser: parser structure holding parsing context.
1178 *
1179 * Userspace sends a special sequence for VLINE waits.
1180 * PACKET0 - VLINE_START_END + value
1181 * PACKET0 - WAIT_UNTIL +_value
1182 * RELOC (P3) - crtc_id in reloc.
1183 *
1184 * This function parses this and relocates the VLINE START END
1185 * and WAIT UNTIL packets to the correct crtc.
1186 * It also detects a switched off crtc and nulls out the
1187 * wait in that case.
1188 */
1189 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1190 {
1191 struct drm_mode_object *obj;
1192 struct drm_crtc *crtc;
1193 struct radeon_crtc *radeon_crtc;
1194 struct radeon_cs_packet p3reloc, waitreloc;
1195 int crtc_id;
1196 int r;
1197 uint32_t header, h_idx, reg;
1198 volatile uint32_t *ib;
1199
1200 ib = p->ib->ptr;
1201
1202 /* parse the wait until */
1203 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
1204 if (r)
1205 return r;
1206
1207 /* check its a wait until and only 1 count */
1208 if (waitreloc.reg != RADEON_WAIT_UNTIL ||
1209 waitreloc.count != 0) {
1210 DRM_ERROR("vline wait had illegal wait until segment\n");
1211 r = -EINVAL;
1212 return r;
1213 }
1214
1215 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
1216 DRM_ERROR("vline wait had illegal wait until\n");
1217 r = -EINVAL;
1218 return r;
1219 }
1220
1221 /* jump over the NOP */
1222 r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
1223 if (r)
1224 return r;
1225
1226 h_idx = p->idx - 2;
1227 p->idx += waitreloc.count + 2;
1228 p->idx += p3reloc.count + 2;
1229
1230 header = radeon_get_ib_value(p, h_idx);
1231 crtc_id = radeon_get_ib_value(p, h_idx + 5);
1232 reg = CP_PACKET0_GET_REG(header);
1233 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1234 if (!obj) {
1235 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1236 r = -EINVAL;
1237 goto out;
1238 }
1239 crtc = obj_to_crtc(obj);
1240 radeon_crtc = to_radeon_crtc(crtc);
1241 crtc_id = radeon_crtc->crtc_id;
1242
1243 if (!crtc->enabled) {
1244 /* if the CRTC isn't enabled - we need to nop out the wait until */
1245 ib[h_idx + 2] = PACKET2(0);
1246 ib[h_idx + 3] = PACKET2(0);
1247 } else if (crtc_id == 1) {
1248 switch (reg) {
1249 case AVIVO_D1MODE_VLINE_START_END:
1250 header &= ~R300_CP_PACKET0_REG_MASK;
1251 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1252 break;
1253 case RADEON_CRTC_GUI_TRIG_VLINE:
1254 header &= ~R300_CP_PACKET0_REG_MASK;
1255 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1256 break;
1257 default:
1258 DRM_ERROR("unknown crtc reloc\n");
1259 r = -EINVAL;
1260 goto out;
1261 }
1262 ib[h_idx] = header;
1263 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1264 }
1265 out:
1266 return r;
1267 }
1268
1269 /**
1270 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
1271 * @parser: parser structure holding parsing context.
1272 * @data: pointer to relocation data
1273 * @offset_start: starting offset
1274 * @offset_mask: offset mask (to align start offset on)
1275 * @reloc: reloc informations
1276 *
1277 * Check next packet is relocation packet3, do bo validation and compute
1278 * GPU offset using the provided start.
1279 **/
1280 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1281 struct radeon_cs_reloc **cs_reloc)
1282 {
1283 struct radeon_cs_chunk *relocs_chunk;
1284 struct radeon_cs_packet p3reloc;
1285 unsigned idx;
1286 int r;
1287
1288 if (p->chunk_relocs_idx == -1) {
1289 DRM_ERROR("No relocation chunk !\n");
1290 return -EINVAL;
1291 }
1292 *cs_reloc = NULL;
1293 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1294 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1295 if (r) {
1296 return r;
1297 }
1298 p->idx += p3reloc.count + 2;
1299 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1300 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1301 p3reloc.idx);
1302 r100_cs_dump_packet(p, &p3reloc);
1303 return -EINVAL;
1304 }
1305 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1306 if (idx >= relocs_chunk->length_dw) {
1307 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1308 idx, relocs_chunk->length_dw);
1309 r100_cs_dump_packet(p, &p3reloc);
1310 return -EINVAL;
1311 }
1312 /* FIXME: we assume reloc size is 4 dwords */
1313 *cs_reloc = p->relocs_ptr[(idx / 4)];
1314 return 0;
1315 }
1316
1317 static int r100_get_vtx_size(uint32_t vtx_fmt)
1318 {
1319 int vtx_size;
1320 vtx_size = 2;
1321 /* ordered according to bits in spec */
1322 if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1323 vtx_size++;
1324 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1325 vtx_size += 3;
1326 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1327 vtx_size++;
1328 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1329 vtx_size++;
1330 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1331 vtx_size += 3;
1332 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1333 vtx_size++;
1334 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1335 vtx_size++;
1336 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1337 vtx_size += 2;
1338 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1339 vtx_size += 2;
1340 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1341 vtx_size++;
1342 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1343 vtx_size += 2;
1344 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1345 vtx_size++;
1346 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1347 vtx_size += 2;
1348 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1349 vtx_size++;
1350 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1351 vtx_size++;
1352 /* blend weight */
1353 if (vtx_fmt & (0x7 << 15))
1354 vtx_size += (vtx_fmt >> 15) & 0x7;
1355 if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1356 vtx_size += 3;
1357 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1358 vtx_size += 2;
1359 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1360 vtx_size++;
1361 if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1362 vtx_size++;
1363 if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1364 vtx_size++;
1365 if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1366 vtx_size++;
1367 return vtx_size;
1368 }
1369
1370 static int r100_packet0_check(struct radeon_cs_parser *p,
1371 struct radeon_cs_packet *pkt,
1372 unsigned idx, unsigned reg)
1373 {
1374 struct radeon_cs_reloc *reloc;
1375 struct r100_cs_track *track;
1376 volatile uint32_t *ib;
1377 uint32_t tmp;
1378 int r;
1379 int i, face;
1380 u32 tile_flags = 0;
1381 u32 idx_value;
1382
1383 ib = p->ib->ptr;
1384 track = (struct r100_cs_track *)p->track;
1385
1386 idx_value = radeon_get_ib_value(p, idx);
1387
1388 switch (reg) {
1389 case RADEON_CRTC_GUI_TRIG_VLINE:
1390 r = r100_cs_packet_parse_vline(p);
1391 if (r) {
1392 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1393 idx, reg);
1394 r100_cs_dump_packet(p, pkt);
1395 return r;
1396 }
1397 break;
1398 /* FIXME: only allow PACKET3 blit? easier to check for out of
1399 * range access */
1400 case RADEON_DST_PITCH_OFFSET:
1401 case RADEON_SRC_PITCH_OFFSET:
1402 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1403 if (r)
1404 return r;
1405 break;
1406 case RADEON_RB3D_DEPTHOFFSET:
1407 r = r100_cs_packet_next_reloc(p, &reloc);
1408 if (r) {
1409 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1410 idx, reg);
1411 r100_cs_dump_packet(p, pkt);
1412 return r;
1413 }
1414 track->zb.robj = reloc->robj;
1415 track->zb.offset = idx_value;
1416 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1417 break;
1418 case RADEON_RB3D_COLOROFFSET:
1419 r = r100_cs_packet_next_reloc(p, &reloc);
1420 if (r) {
1421 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1422 idx, reg);
1423 r100_cs_dump_packet(p, pkt);
1424 return r;
1425 }
1426 track->cb[0].robj = reloc->robj;
1427 track->cb[0].offset = idx_value;
1428 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1429 break;
1430 case RADEON_PP_TXOFFSET_0:
1431 case RADEON_PP_TXOFFSET_1:
1432 case RADEON_PP_TXOFFSET_2:
1433 i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1434 r = r100_cs_packet_next_reloc(p, &reloc);
1435 if (r) {
1436 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1437 idx, reg);
1438 r100_cs_dump_packet(p, pkt);
1439 return r;
1440 }
1441 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1442 track->textures[i].robj = reloc->robj;
1443 break;
1444 case RADEON_PP_CUBIC_OFFSET_T0_0:
1445 case RADEON_PP_CUBIC_OFFSET_T0_1:
1446 case RADEON_PP_CUBIC_OFFSET_T0_2:
1447 case RADEON_PP_CUBIC_OFFSET_T0_3:
1448 case RADEON_PP_CUBIC_OFFSET_T0_4:
1449 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1450 r = r100_cs_packet_next_reloc(p, &reloc);
1451 if (r) {
1452 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1453 idx, reg);
1454 r100_cs_dump_packet(p, pkt);
1455 return r;
1456 }
1457 track->textures[0].cube_info[i].offset = idx_value;
1458 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1459 track->textures[0].cube_info[i].robj = reloc->robj;
1460 break;
1461 case RADEON_PP_CUBIC_OFFSET_T1_0:
1462 case RADEON_PP_CUBIC_OFFSET_T1_1:
1463 case RADEON_PP_CUBIC_OFFSET_T1_2:
1464 case RADEON_PP_CUBIC_OFFSET_T1_3:
1465 case RADEON_PP_CUBIC_OFFSET_T1_4:
1466 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1467 r = r100_cs_packet_next_reloc(p, &reloc);
1468 if (r) {
1469 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1470 idx, reg);
1471 r100_cs_dump_packet(p, pkt);
1472 return r;
1473 }
1474 track->textures[1].cube_info[i].offset = idx_value;
1475 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1476 track->textures[1].cube_info[i].robj = reloc->robj;
1477 break;
1478 case RADEON_PP_CUBIC_OFFSET_T2_0:
1479 case RADEON_PP_CUBIC_OFFSET_T2_1:
1480 case RADEON_PP_CUBIC_OFFSET_T2_2:
1481 case RADEON_PP_CUBIC_OFFSET_T2_3:
1482 case RADEON_PP_CUBIC_OFFSET_T2_4:
1483 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1484 r = r100_cs_packet_next_reloc(p, &reloc);
1485 if (r) {
1486 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1487 idx, reg);
1488 r100_cs_dump_packet(p, pkt);
1489 return r;
1490 }
1491 track->textures[2].cube_info[i].offset = idx_value;
1492 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1493 track->textures[2].cube_info[i].robj = reloc->robj;
1494 break;
1495 case RADEON_RE_WIDTH_HEIGHT:
1496 track->maxy = ((idx_value >> 16) & 0x7FF);
1497 break;
1498 case RADEON_RB3D_COLORPITCH:
1499 r = r100_cs_packet_next_reloc(p, &reloc);
1500 if (r) {
1501 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1502 idx, reg);
1503 r100_cs_dump_packet(p, pkt);
1504 return r;
1505 }
1506
1507 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1508 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1509 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1510 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1511
1512 tmp = idx_value & ~(0x7 << 16);
1513 tmp |= tile_flags;
1514 ib[idx] = tmp;
1515
1516 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1517 break;
1518 case RADEON_RB3D_DEPTHPITCH:
1519 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1520 break;
1521 case RADEON_RB3D_CNTL:
1522 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1523 case 7:
1524 case 8:
1525 case 9:
1526 case 11:
1527 case 12:
1528 track->cb[0].cpp = 1;
1529 break;
1530 case 3:
1531 case 4:
1532 case 15:
1533 track->cb[0].cpp = 2;
1534 break;
1535 case 6:
1536 track->cb[0].cpp = 4;
1537 break;
1538 default:
1539 DRM_ERROR("Invalid color buffer format (%d) !\n",
1540 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1541 return -EINVAL;
1542 }
1543 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1544 break;
1545 case RADEON_RB3D_ZSTENCILCNTL:
1546 switch (idx_value & 0xf) {
1547 case 0:
1548 track->zb.cpp = 2;
1549 break;
1550 case 2:
1551 case 3:
1552 case 4:
1553 case 5:
1554 case 9:
1555 case 11:
1556 track->zb.cpp = 4;
1557 break;
1558 default:
1559 break;
1560 }
1561 break;
1562 case RADEON_RB3D_ZPASS_ADDR:
1563 r = r100_cs_packet_next_reloc(p, &reloc);
1564 if (r) {
1565 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1566 idx, reg);
1567 r100_cs_dump_packet(p, pkt);
1568 return r;
1569 }
1570 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1571 break;
1572 case RADEON_PP_CNTL:
1573 {
1574 uint32_t temp = idx_value >> 4;
1575 for (i = 0; i < track->num_texture; i++)
1576 track->textures[i].enabled = !!(temp & (1 << i));
1577 }
1578 break;
1579 case RADEON_SE_VF_CNTL:
1580 track->vap_vf_cntl = idx_value;
1581 break;
1582 case RADEON_SE_VTX_FMT:
1583 track->vtx_size = r100_get_vtx_size(idx_value);
1584 break;
1585 case RADEON_PP_TEX_SIZE_0:
1586 case RADEON_PP_TEX_SIZE_1:
1587 case RADEON_PP_TEX_SIZE_2:
1588 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1589 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1590 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1591 break;
1592 case RADEON_PP_TEX_PITCH_0:
1593 case RADEON_PP_TEX_PITCH_1:
1594 case RADEON_PP_TEX_PITCH_2:
1595 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1596 track->textures[i].pitch = idx_value + 32;
1597 break;
1598 case RADEON_PP_TXFILTER_0:
1599 case RADEON_PP_TXFILTER_1:
1600 case RADEON_PP_TXFILTER_2:
1601 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1602 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1603 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1604 tmp = (idx_value >> 23) & 0x7;
1605 if (tmp == 2 || tmp == 6)
1606 track->textures[i].roundup_w = false;
1607 tmp = (idx_value >> 27) & 0x7;
1608 if (tmp == 2 || tmp == 6)
1609 track->textures[i].roundup_h = false;
1610 break;
1611 case RADEON_PP_TXFORMAT_0:
1612 case RADEON_PP_TXFORMAT_1:
1613 case RADEON_PP_TXFORMAT_2:
1614 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1615 if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1616 track->textures[i].use_pitch = 1;
1617 } else {
1618 track->textures[i].use_pitch = 0;
1619 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1620 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1621 }
1622 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1623 track->textures[i].tex_coord_type = 2;
1624 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1625 case RADEON_TXFORMAT_I8:
1626 case RADEON_TXFORMAT_RGB332:
1627 case RADEON_TXFORMAT_Y8:
1628 track->textures[i].cpp = 1;
1629 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1630 break;
1631 case RADEON_TXFORMAT_AI88:
1632 case RADEON_TXFORMAT_ARGB1555:
1633 case RADEON_TXFORMAT_RGB565:
1634 case RADEON_TXFORMAT_ARGB4444:
1635 case RADEON_TXFORMAT_VYUY422:
1636 case RADEON_TXFORMAT_YVYU422:
1637 case RADEON_TXFORMAT_SHADOW16:
1638 case RADEON_TXFORMAT_LDUDV655:
1639 case RADEON_TXFORMAT_DUDV88:
1640 track->textures[i].cpp = 2;
1641 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1642 break;
1643 case RADEON_TXFORMAT_ARGB8888:
1644 case RADEON_TXFORMAT_RGBA8888:
1645 case RADEON_TXFORMAT_SHADOW32:
1646 case RADEON_TXFORMAT_LDUDUV8888:
1647 track->textures[i].cpp = 4;
1648 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1649 break;
1650 case RADEON_TXFORMAT_DXT1:
1651 track->textures[i].cpp = 1;
1652 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
1653 break;
1654 case RADEON_TXFORMAT_DXT23:
1655 case RADEON_TXFORMAT_DXT45:
1656 track->textures[i].cpp = 1;
1657 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
1658 break;
1659 }
1660 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1661 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1662 break;
1663 case RADEON_PP_CUBIC_FACES_0:
1664 case RADEON_PP_CUBIC_FACES_1:
1665 case RADEON_PP_CUBIC_FACES_2:
1666 tmp = idx_value;
1667 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1668 for (face = 0; face < 4; face++) {
1669 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1670 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1671 }
1672 break;
1673 default:
1674 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1675 reg, idx);
1676 return -EINVAL;
1677 }
1678 return 0;
1679 }
1680
1681 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1682 struct radeon_cs_packet *pkt,
1683 struct radeon_bo *robj)
1684 {
1685 unsigned idx;
1686 u32 value;
1687 idx = pkt->idx + 1;
1688 value = radeon_get_ib_value(p, idx + 2);
1689 if ((value + 1) > radeon_bo_size(robj)) {
1690 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1691 "(need %u have %lu) !\n",
1692 value + 1,
1693 radeon_bo_size(robj));
1694 return -EINVAL;
1695 }
1696 return 0;
1697 }
1698
1699 static int r100_packet3_check(struct radeon_cs_parser *p,
1700 struct radeon_cs_packet *pkt)
1701 {
1702 struct radeon_cs_reloc *reloc;
1703 struct r100_cs_track *track;
1704 unsigned idx;
1705 volatile uint32_t *ib;
1706 int r;
1707
1708 ib = p->ib->ptr;
1709 idx = pkt->idx + 1;
1710 track = (struct r100_cs_track *)p->track;
1711 switch (pkt->opcode) {
1712 case PACKET3_3D_LOAD_VBPNTR:
1713 r = r100_packet3_load_vbpntr(p, pkt, idx);
1714 if (r)
1715 return r;
1716 break;
1717 case PACKET3_INDX_BUFFER:
1718 r = r100_cs_packet_next_reloc(p, &reloc);
1719 if (r) {
1720 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1721 r100_cs_dump_packet(p, pkt);
1722 return r;
1723 }
1724 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
1725 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1726 if (r) {
1727 return r;
1728 }
1729 break;
1730 case 0x23:
1731 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1732 r = r100_cs_packet_next_reloc(p, &reloc);
1733 if (r) {
1734 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1735 r100_cs_dump_packet(p, pkt);
1736 return r;
1737 }
1738 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
1739 track->num_arrays = 1;
1740 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1741
1742 track->arrays[0].robj = reloc->robj;
1743 track->arrays[0].esize = track->vtx_size;
1744
1745 track->max_indx = radeon_get_ib_value(p, idx+1);
1746
1747 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
1748 track->immd_dwords = pkt->count - 1;
1749 r = r100_cs_track_check(p->rdev, track);
1750 if (r)
1751 return r;
1752 break;
1753 case PACKET3_3D_DRAW_IMMD:
1754 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1755 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1756 return -EINVAL;
1757 }
1758 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
1759 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1760 track->immd_dwords = pkt->count - 1;
1761 r = r100_cs_track_check(p->rdev, track);
1762 if (r)
1763 return r;
1764 break;
1765 /* triggers drawing using in-packet vertex data */
1766 case PACKET3_3D_DRAW_IMMD_2:
1767 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1768 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1769 return -EINVAL;
1770 }
1771 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1772 track->immd_dwords = pkt->count;
1773 r = r100_cs_track_check(p->rdev, track);
1774 if (r)
1775 return r;
1776 break;
1777 /* triggers drawing using in-packet vertex data */
1778 case PACKET3_3D_DRAW_VBUF_2:
1779 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1780 r = r100_cs_track_check(p->rdev, track);
1781 if (r)
1782 return r;
1783 break;
1784 /* triggers drawing of vertex buffers setup elsewhere */
1785 case PACKET3_3D_DRAW_INDX_2:
1786 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1787 r = r100_cs_track_check(p->rdev, track);
1788 if (r)
1789 return r;
1790 break;
1791 /* triggers drawing using indices to vertex buffer */
1792 case PACKET3_3D_DRAW_VBUF:
1793 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1794 r = r100_cs_track_check(p->rdev, track);
1795 if (r)
1796 return r;
1797 break;
1798 /* triggers drawing of vertex buffers setup elsewhere */
1799 case PACKET3_3D_DRAW_INDX:
1800 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1801 r = r100_cs_track_check(p->rdev, track);
1802 if (r)
1803 return r;
1804 break;
1805 /* triggers drawing using indices to vertex buffer */
1806 case PACKET3_3D_CLEAR_HIZ:
1807 case PACKET3_3D_CLEAR_ZMASK:
1808 if (p->rdev->hyperz_filp != p->filp)
1809 return -EINVAL;
1810 break;
1811 case PACKET3_NOP:
1812 break;
1813 default:
1814 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1815 return -EINVAL;
1816 }
1817 return 0;
1818 }
1819
1820 int r100_cs_parse(struct radeon_cs_parser *p)
1821 {
1822 struct radeon_cs_packet pkt;
1823 struct r100_cs_track *track;
1824 int r;
1825
1826 track = kzalloc(sizeof(*track), GFP_KERNEL);
1827 r100_cs_track_clear(p->rdev, track);
1828 p->track = track;
1829 do {
1830 r = r100_cs_packet_parse(p, &pkt, p->idx);
1831 if (r) {
1832 return r;
1833 }
1834 p->idx += pkt.count + 2;
1835 switch (pkt.type) {
1836 case PACKET_TYPE0:
1837 if (p->rdev->family >= CHIP_R200)
1838 r = r100_cs_parse_packet0(p, &pkt,
1839 p->rdev->config.r100.reg_safe_bm,
1840 p->rdev->config.r100.reg_safe_bm_size,
1841 &r200_packet0_check);
1842 else
1843 r = r100_cs_parse_packet0(p, &pkt,
1844 p->rdev->config.r100.reg_safe_bm,
1845 p->rdev->config.r100.reg_safe_bm_size,
1846 &r100_packet0_check);
1847 break;
1848 case PACKET_TYPE2:
1849 break;
1850 case PACKET_TYPE3:
1851 r = r100_packet3_check(p, &pkt);
1852 break;
1853 default:
1854 DRM_ERROR("Unknown packet type %d !\n",
1855 pkt.type);
1856 return -EINVAL;
1857 }
1858 if (r) {
1859 return r;
1860 }
1861 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1862 return 0;
1863 }
1864
1865
1866 /*
1867 * Global GPU functions
1868 */
1869 void r100_errata(struct radeon_device *rdev)
1870 {
1871 rdev->pll_errata = 0;
1872
1873 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1874 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1875 }
1876
1877 if (rdev->family == CHIP_RV100 ||
1878 rdev->family == CHIP_RS100 ||
1879 rdev->family == CHIP_RS200) {
1880 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1881 }
1882 }
1883
1884 /* Wait for vertical sync on primary CRTC */
1885 void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1886 {
1887 uint32_t crtc_gen_cntl, tmp;
1888 int i;
1889
1890 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1891 if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1892 !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1893 return;
1894 }
1895 /* Clear the CRTC_VBLANK_SAVE bit */
1896 WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1897 for (i = 0; i < rdev->usec_timeout; i++) {
1898 tmp = RREG32(RADEON_CRTC_STATUS);
1899 if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1900 return;
1901 }
1902 DRM_UDELAY(1);
1903 }
1904 }
1905
1906 /* Wait for vertical sync on secondary CRTC */
1907 void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1908 {
1909 uint32_t crtc2_gen_cntl, tmp;
1910 int i;
1911
1912 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1913 if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1914 !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1915 return;
1916
1917 /* Clear the CRTC_VBLANK_SAVE bit */
1918 WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1919 for (i = 0; i < rdev->usec_timeout; i++) {
1920 tmp = RREG32(RADEON_CRTC2_STATUS);
1921 if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1922 return;
1923 }
1924 DRM_UDELAY(1);
1925 }
1926 }
1927
1928 int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1929 {
1930 unsigned i;
1931 uint32_t tmp;
1932
1933 for (i = 0; i < rdev->usec_timeout; i++) {
1934 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1935 if (tmp >= n) {
1936 return 0;
1937 }
1938 DRM_UDELAY(1);
1939 }
1940 return -1;
1941 }
1942
1943 int r100_gui_wait_for_idle(struct radeon_device *rdev)
1944 {
1945 unsigned i;
1946 uint32_t tmp;
1947
1948 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1949 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1950 " Bad things might happen.\n");
1951 }
1952 for (i = 0; i < rdev->usec_timeout; i++) {
1953 tmp = RREG32(RADEON_RBBM_STATUS);
1954 if (!(tmp & RADEON_RBBM_ACTIVE)) {
1955 return 0;
1956 }
1957 DRM_UDELAY(1);
1958 }
1959 return -1;
1960 }
1961
1962 int r100_mc_wait_for_idle(struct radeon_device *rdev)
1963 {
1964 unsigned i;
1965 uint32_t tmp;
1966
1967 for (i = 0; i < rdev->usec_timeout; i++) {
1968 /* read MC_STATUS */
1969 tmp = RREG32(RADEON_MC_STATUS);
1970 if (tmp & RADEON_MC_IDLE) {
1971 return 0;
1972 }
1973 DRM_UDELAY(1);
1974 }
1975 return -1;
1976 }
1977
1978 void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
1979 {
1980 lockup->last_cp_rptr = cp->rptr;
1981 lockup->last_jiffies = jiffies;
1982 }
1983
1984 /**
1985 * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
1986 * @rdev: radeon device structure
1987 * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
1988 * @cp: radeon_cp structure holding CP information
1989 *
1990 * We don't need to initialize the lockup tracking information as we will either
1991 * have CP rptr to a different value of jiffies wrap around which will force
1992 * initialization of the lockup tracking informations.
1993 *
1994 * A possible false positivie is if we get call after while and last_cp_rptr ==
1995 * the current CP rptr, even if it's unlikely it might happen. To avoid this
1996 * if the elapsed time since last call is bigger than 2 second than we return
1997 * false and update the tracking information. Due to this the caller must call
1998 * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
1999 * the fencing code should be cautious about that.
2000 *
2001 * Caller should write to the ring to force CP to do something so we don't get
2002 * false positive when CP is just gived nothing to do.
2003 *
2004 **/
2005 bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
2006 {
2007 unsigned long cjiffies, elapsed;
2008
2009 cjiffies = jiffies;
2010 if (!time_after(cjiffies, lockup->last_jiffies)) {
2011 /* likely a wrap around */
2012 lockup->last_cp_rptr = cp->rptr;
2013 lockup->last_jiffies = jiffies;
2014 return false;
2015 }
2016 if (cp->rptr != lockup->last_cp_rptr) {
2017 /* CP is still working no lockup */
2018 lockup->last_cp_rptr = cp->rptr;
2019 lockup->last_jiffies = jiffies;
2020 return false;
2021 }
2022 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
2023 if (elapsed >= 10000) {
2024 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
2025 return true;
2026 }
2027 /* give a chance to the GPU ... */
2028 return false;
2029 }
2030
2031 bool r100_gpu_is_lockup(struct radeon_device *rdev)
2032 {
2033 u32 rbbm_status;
2034 int r;
2035
2036 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2037 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2038 r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
2039 return false;
2040 }
2041 /* force CP activities */
2042 r = radeon_ring_lock(rdev, 2);
2043 if (!r) {
2044 /* PACKET2 NOP */
2045 radeon_ring_write(rdev, 0x80000000);
2046 radeon_ring_write(rdev, 0x80000000);
2047 radeon_ring_unlock_commit(rdev);
2048 }
2049 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
2050 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
2051 }
2052
2053 void r100_bm_disable(struct radeon_device *rdev)
2054 {
2055 u32 tmp;
2056
2057 /* disable bus mastering */
2058 tmp = RREG32(R_000030_BUS_CNTL);
2059 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
2060 mdelay(1);
2061 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
2062 mdelay(1);
2063 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
2064 tmp = RREG32(RADEON_BUS_CNTL);
2065 mdelay(1);
2066 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
2067 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
2068 mdelay(1);
2069 }
2070
2071 int r100_asic_reset(struct radeon_device *rdev)
2072 {
2073 struct r100_mc_save save;
2074 u32 status, tmp;
2075
2076 r100_mc_stop(rdev, &save);
2077 status = RREG32(R_000E40_RBBM_STATUS);
2078 if (!G_000E40_GUI_ACTIVE(status)) {
2079 return 0;
2080 }
2081 status = RREG32(R_000E40_RBBM_STATUS);
2082 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2083 /* stop CP */
2084 WREG32(RADEON_CP_CSQ_CNTL, 0);
2085 tmp = RREG32(RADEON_CP_RB_CNTL);
2086 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
2087 WREG32(RADEON_CP_RB_RPTR_WR, 0);
2088 WREG32(RADEON_CP_RB_WPTR, 0);
2089 WREG32(RADEON_CP_RB_CNTL, tmp);
2090 /* save PCI state */
2091 pci_save_state(rdev->pdev);
2092 /* disable bus mastering */
2093 r100_bm_disable(rdev);
2094 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
2095 S_0000F0_SOFT_RESET_RE(1) |
2096 S_0000F0_SOFT_RESET_PP(1) |
2097 S_0000F0_SOFT_RESET_RB(1));
2098 RREG32(R_0000F0_RBBM_SOFT_RESET);
2099 mdelay(500);
2100 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2101 mdelay(1);
2102 status = RREG32(R_000E40_RBBM_STATUS);
2103 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2104 /* reset CP */
2105 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
2106 RREG32(R_0000F0_RBBM_SOFT_RESET);
2107 mdelay(500);
2108 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2109 mdelay(1);
2110 status = RREG32(R_000E40_RBBM_STATUS);
2111 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2112 /* restore PCI & busmastering */
2113 pci_restore_state(rdev->pdev);
2114 r100_enable_bm(rdev);
2115 /* Check if GPU is idle */
2116 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
2117 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2118 dev_err(rdev->dev, "failed to reset GPU\n");
2119 rdev->gpu_lockup = true;
2120 return -1;
2121 }
2122 r100_mc_resume(rdev, &save);
2123 dev_info(rdev->dev, "GPU reset succeed\n");
2124 return 0;
2125 }
2126
2127 void r100_set_common_regs(struct radeon_device *rdev)
2128 {
2129 struct drm_device *dev = rdev->ddev;
2130 bool force_dac2 = false;
2131 u32 tmp;
2132
2133 /* set these so they don't interfere with anything */
2134 WREG32(RADEON_OV0_SCALE_CNTL, 0);
2135 WREG32(RADEON_SUBPIC_CNTL, 0);
2136 WREG32(RADEON_VIPH_CONTROL, 0);
2137 WREG32(RADEON_I2C_CNTL_1, 0);
2138 WREG32(RADEON_DVI_I2C_CNTL_1, 0);
2139 WREG32(RADEON_CAP0_TRIG_CNTL, 0);
2140 WREG32(RADEON_CAP1_TRIG_CNTL, 0);
2141
2142 /* always set up dac2 on rn50 and some rv100 as lots
2143 * of servers seem to wire it up to a VGA port but
2144 * don't report it in the bios connector
2145 * table.
2146 */
2147 switch (dev->pdev->device) {
2148 /* RN50 */
2149 case 0x515e:
2150 case 0x5969:
2151 force_dac2 = true;
2152 break;
2153 /* RV100*/
2154 case 0x5159:
2155 case 0x515a:
2156 /* DELL triple head servers */
2157 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
2158 ((dev->pdev->subsystem_device == 0x016c) ||
2159 (dev->pdev->subsystem_device == 0x016d) ||
2160 (dev->pdev->subsystem_device == 0x016e) ||
2161 (dev->pdev->subsystem_device == 0x016f) ||
2162 (dev->pdev->subsystem_device == 0x0170) ||
2163 (dev->pdev->subsystem_device == 0x017d) ||
2164 (dev->pdev->subsystem_device == 0x017e) ||
2165 (dev->pdev->subsystem_device == 0x0183) ||
2166 (dev->pdev->subsystem_device == 0x018a) ||
2167 (dev->pdev->subsystem_device == 0x019a)))
2168 force_dac2 = true;
2169 break;
2170 }
2171
2172 if (force_dac2) {
2173 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
2174 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
2175 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
2176
2177 /* For CRT on DAC2, don't turn it on if BIOS didn't
2178 enable it, even it's detected.
2179 */
2180
2181 /* force it to crtc0 */
2182 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
2183 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
2184 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
2185
2186 /* set up the TV DAC */
2187 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
2188 RADEON_TV_DAC_STD_MASK |
2189 RADEON_TV_DAC_RDACPD |
2190 RADEON_TV_DAC_GDACPD |
2191 RADEON_TV_DAC_BDACPD |
2192 RADEON_TV_DAC_BGADJ_MASK |
2193 RADEON_TV_DAC_DACADJ_MASK);
2194 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
2195 RADEON_TV_DAC_NHOLD |
2196 RADEON_TV_DAC_STD_PS2 |
2197 (0x58 << 16));
2198
2199 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
2200 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
2201 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
2202 }
2203
2204 /* switch PM block to ACPI mode */
2205 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
2206 tmp &= ~RADEON_PM_MODE_SEL;
2207 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
2208
2209 }
2210
2211 /*
2212 * VRAM info
2213 */
2214 static void r100_vram_get_type(struct radeon_device *rdev)
2215 {
2216 uint32_t tmp;
2217
2218 rdev->mc.vram_is_ddr = false;
2219 if (rdev->flags & RADEON_IS_IGP)
2220 rdev->mc.vram_is_ddr = true;
2221 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
2222 rdev->mc.vram_is_ddr = true;
2223 if ((rdev->family == CHIP_RV100) ||
2224 (rdev->family == CHIP_RS100) ||
2225 (rdev->family == CHIP_RS200)) {
2226 tmp = RREG32(RADEON_MEM_CNTL);
2227 if (tmp & RV100_HALF_MODE) {
2228 rdev->mc.vram_width = 32;
2229 } else {
2230 rdev->mc.vram_width = 64;
2231 }
2232 if (rdev->flags & RADEON_SINGLE_CRTC) {
2233 rdev->mc.vram_width /= 4;
2234 rdev->mc.vram_is_ddr = true;
2235 }
2236 } else if (rdev->family <= CHIP_RV280) {
2237 tmp = RREG32(RADEON_MEM_CNTL);
2238 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
2239 rdev->mc.vram_width = 128;
2240 } else {
2241 rdev->mc.vram_width = 64;
2242 }
2243 } else {
2244 /* newer IGPs */
2245 rdev->mc.vram_width = 128;
2246 }
2247 }
2248
2249 static u32 r100_get_accessible_vram(struct radeon_device *rdev)
2250 {
2251 u32 aper_size;
2252 u8 byte;
2253
2254 aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2255
2256 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
2257 * that is has the 2nd generation multifunction PCI interface
2258 */
2259 if (rdev->family == CHIP_RV280 ||
2260 rdev->family >= CHIP_RV350) {
2261 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
2262 ~RADEON_HDP_APER_CNTL);
2263 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
2264 return aper_size * 2;
2265 }
2266
2267 /* Older cards have all sorts of funny issues to deal with. First
2268 * check if it's a multifunction card by reading the PCI config
2269 * header type... Limit those to one aperture size
2270 */
2271 pci_read_config_byte(rdev->pdev, 0xe, &byte);
2272 if (byte & 0x80) {
2273 DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
2274 DRM_INFO("Limiting VRAM to one aperture\n");
2275 return aper_size;
2276 }
2277
2278 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
2279 * have set it up. We don't write this as it's broken on some ASICs but
2280 * we expect the BIOS to have done the right thing (might be too optimistic...)
2281 */
2282 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
2283 return aper_size * 2;
2284 return aper_size;
2285 }
2286
2287 void r100_vram_init_sizes(struct radeon_device *rdev)
2288 {
2289 u64 config_aper_size;
2290
2291 /* work out accessible VRAM */
2292 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2293 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
2294 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
2295 /* FIXME we don't use the second aperture yet when we could use it */
2296 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2297 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2298 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2299 if (rdev->flags & RADEON_IS_IGP) {
2300 uint32_t tom;
2301 /* read NB_TOM to get the amount of ram stolen for the GPU */
2302 tom = RREG32(RADEON_NB_TOM);
2303 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
2304 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2305 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2306 } else {
2307 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
2308 /* Some production boards of m6 will report 0
2309 * if it's 8 MB
2310 */
2311 if (rdev->mc.real_vram_size == 0) {
2312 rdev->mc.real_vram_size = 8192 * 1024;
2313 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2314 }
2315 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
2316 * Novell bug 204882 + along with lots of ubuntu ones
2317 */
2318 if (config_aper_size > rdev->mc.real_vram_size)
2319 rdev->mc.mc_vram_size = config_aper_size;
2320 else
2321 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2322 }
2323 }
2324
2325 void r100_vga_set_state(struct radeon_device *rdev, bool state)
2326 {
2327 uint32_t temp;
2328
2329 temp = RREG32(RADEON_CONFIG_CNTL);
2330 if (state == false) {
2331 temp &= ~(1<<8);
2332 temp |= (1<<9);
2333 } else {
2334 temp &= ~(1<<9);
2335 }
2336 WREG32(RADEON_CONFIG_CNTL, temp);
2337 }
2338
2339 void r100_mc_init(struct radeon_device *rdev)
2340 {
2341 u64 base;
2342
2343 r100_vram_get_type(rdev);
2344 r100_vram_init_sizes(rdev);
2345 base = rdev->mc.aper_base;
2346 if (rdev->flags & RADEON_IS_IGP)
2347 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2348 radeon_vram_location(rdev, &rdev->mc, base);
2349 rdev->mc.gtt_base_align = 0;
2350 if (!(rdev->flags & RADEON_IS_AGP))
2351 radeon_gtt_location(rdev, &rdev->mc);
2352 radeon_update_bandwidth_info(rdev);
2353 }
2354
2355
2356 /*
2357 * Indirect registers accessor
2358 */
2359 void r100_pll_errata_after_index(struct radeon_device *rdev)
2360 {
2361 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
2362 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
2363 (void)RREG32(RADEON_CRTC_GEN_CNTL);
2364 }
2365 }
2366
2367 static void r100_pll_errata_after_data(struct radeon_device *rdev)
2368 {
2369 /* This workarounds is necessary on RV100, RS100 and RS200 chips
2370 * or the chip could hang on a subsequent access
2371 */
2372 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
2373 udelay(5000);
2374 }
2375
2376 /* This function is required to workaround a hardware bug in some (all?)
2377 * revisions of the R300. This workaround should be called after every
2378 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
2379 * may not be correct.
2380 */
2381 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
2382 uint32_t save, tmp;
2383
2384 save = RREG32(RADEON_CLOCK_CNTL_INDEX);
2385 tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
2386 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
2387 tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
2388 WREG32(RADEON_CLOCK_CNTL_INDEX, save);
2389 }
2390 }
2391
2392 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2393 {
2394 uint32_t data;
2395
2396 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2397 r100_pll_errata_after_index(rdev);
2398 data = RREG32(RADEON_CLOCK_CNTL_DATA);
2399 r100_pll_errata_after_data(rdev);
2400 return data;
2401 }
2402
2403 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2404 {
2405 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
2406 r100_pll_errata_after_index(rdev);
2407 WREG32(RADEON_CLOCK_CNTL_DATA, v);
2408 r100_pll_errata_after_data(rdev);
2409 }
2410
2411 void r100_set_safe_registers(struct radeon_device *rdev)
2412 {
2413 if (ASIC_IS_RN50(rdev)) {
2414 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2415 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
2416 } else if (rdev->family < CHIP_R200) {
2417 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2418 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
2419 } else {
2420 r200_set_safe_registers(rdev);
2421 }
2422 }
2423
2424 /*
2425 * Debugfs info
2426 */
2427 #if defined(CONFIG_DEBUG_FS)
2428 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
2429 {
2430 struct drm_info_node *node = (struct drm_info_node *) m->private;
2431 struct drm_device *dev = node->minor->dev;
2432 struct radeon_device *rdev = dev->dev_private;
2433 uint32_t reg, value;
2434 unsigned i;
2435
2436 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
2437 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
2438 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2439 for (i = 0; i < 64; i++) {
2440 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
2441 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
2442 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
2443 value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
2444 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
2445 }
2446 return 0;
2447 }
2448
2449 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2450 {
2451 struct drm_info_node *node = (struct drm_info_node *) m->private;
2452 struct drm_device *dev = node->minor->dev;
2453 struct radeon_device *rdev = dev->dev_private;
2454 uint32_t rdp, wdp;
2455 unsigned count, i, j;
2456
2457 radeon_ring_free_size(rdev);
2458 rdp = RREG32(RADEON_CP_RB_RPTR);
2459 wdp = RREG32(RADEON_CP_RB_WPTR);
2460 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
2461 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2462 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2463 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2464 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
2465 seq_printf(m, "%u dwords in ring\n", count);
2466 for (j = 0; j <= count; j++) {
2467 i = (rdp + j) & rdev->cp.ptr_mask;
2468 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2469 }
2470 return 0;
2471 }
2472
2473
2474 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
2475 {
2476 struct drm_info_node *node = (struct drm_info_node *) m->private;
2477 struct drm_device *dev = node->minor->dev;
2478 struct radeon_device *rdev = dev->dev_private;
2479 uint32_t csq_stat, csq2_stat, tmp;
2480 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
2481 unsigned i;
2482
2483 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2484 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
2485 csq_stat = RREG32(RADEON_CP_CSQ_STAT);
2486 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
2487 r_rptr = (csq_stat >> 0) & 0x3ff;
2488 r_wptr = (csq_stat >> 10) & 0x3ff;
2489 ib1_rptr = (csq_stat >> 20) & 0x3ff;
2490 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
2491 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
2492 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
2493 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
2494 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
2495 seq_printf(m, "Ring rptr %u\n", r_rptr);
2496 seq_printf(m, "Ring wptr %u\n", r_wptr);
2497 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
2498 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
2499 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
2500 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
2501 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
2502 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
2503 seq_printf(m, "Ring fifo:\n");
2504 for (i = 0; i < 256; i++) {
2505 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2506 tmp = RREG32(RADEON_CP_CSQ_DATA);
2507 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
2508 }
2509 seq_printf(m, "Indirect1 fifo:\n");
2510 for (i = 256; i <= 512; i++) {
2511 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2512 tmp = RREG32(RADEON_CP_CSQ_DATA);
2513 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
2514 }
2515 seq_printf(m, "Indirect2 fifo:\n");
2516 for (i = 640; i < ib1_wptr; i++) {
2517 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2518 tmp = RREG32(RADEON_CP_CSQ_DATA);
2519 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
2520 }
2521 return 0;
2522 }
2523
2524 static int r100_debugfs_mc_info(struct seq_file *m, void *data)
2525 {
2526 struct drm_info_node *node = (struct drm_info_node *) m->private;
2527 struct drm_device *dev = node->minor->dev;
2528 struct radeon_device *rdev = dev->dev_private;
2529 uint32_t tmp;
2530
2531 tmp = RREG32(RADEON_CONFIG_MEMSIZE);
2532 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
2533 tmp = RREG32(RADEON_MC_FB_LOCATION);
2534 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
2535 tmp = RREG32(RADEON_BUS_CNTL);
2536 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
2537 tmp = RREG32(RADEON_MC_AGP_LOCATION);
2538 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
2539 tmp = RREG32(RADEON_AGP_BASE);
2540 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
2541 tmp = RREG32(RADEON_HOST_PATH_CNTL);
2542 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
2543 tmp = RREG32(0x01D0);
2544 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
2545 tmp = RREG32(RADEON_AIC_LO_ADDR);
2546 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
2547 tmp = RREG32(RADEON_AIC_HI_ADDR);
2548 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
2549 tmp = RREG32(0x01E4);
2550 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
2551 return 0;
2552 }
2553
2554 static struct drm_info_list r100_debugfs_rbbm_list[] = {
2555 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
2556 };
2557
2558 static struct drm_info_list r100_debugfs_cp_list[] = {
2559 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
2560 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
2561 };
2562
2563 static struct drm_info_list r100_debugfs_mc_info_list[] = {
2564 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
2565 };
2566 #endif
2567
2568 int r100_debugfs_rbbm_init(struct radeon_device *rdev)
2569 {
2570 #if defined(CONFIG_DEBUG_FS)
2571 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
2572 #else
2573 return 0;
2574 #endif
2575 }
2576
2577 int r100_debugfs_cp_init(struct radeon_device *rdev)
2578 {
2579 #if defined(CONFIG_DEBUG_FS)
2580 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
2581 #else
2582 return 0;
2583 #endif
2584 }
2585
2586 int r100_debugfs_mc_info_init(struct radeon_device *rdev)
2587 {
2588 #if defined(CONFIG_DEBUG_FS)
2589 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
2590 #else
2591 return 0;
2592 #endif
2593 }
2594
2595 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
2596 uint32_t tiling_flags, uint32_t pitch,
2597 uint32_t offset, uint32_t obj_size)
2598 {
2599 int surf_index = reg * 16;
2600 int flags = 0;
2601
2602 if (rdev->family <= CHIP_RS200) {
2603 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2604 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2605 flags |= RADEON_SURF_TILE_COLOR_BOTH;
2606 if (tiling_flags & RADEON_TILING_MACRO)
2607 flags |= RADEON_SURF_TILE_COLOR_MACRO;
2608 } else if (rdev->family <= CHIP_RV280) {
2609 if (tiling_flags & (RADEON_TILING_MACRO))
2610 flags |= R200_SURF_TILE_COLOR_MACRO;
2611 if (tiling_flags & RADEON_TILING_MICRO)
2612 flags |= R200_SURF_TILE_COLOR_MICRO;
2613 } else {
2614 if (tiling_flags & RADEON_TILING_MACRO)
2615 flags |= R300_SURF_TILE_MACRO;
2616 if (tiling_flags & RADEON_TILING_MICRO)
2617 flags |= R300_SURF_TILE_MICRO;
2618 }
2619
2620 if (tiling_flags & RADEON_TILING_SWAP_16BIT)
2621 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
2622 if (tiling_flags & RADEON_TILING_SWAP_32BIT)
2623 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
2624
2625 /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
2626 if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
2627 if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
2628 if (ASIC_IS_RN50(rdev))
2629 pitch /= 16;
2630 }
2631
2632 /* r100/r200 divide by 16 */
2633 if (rdev->family < CHIP_R300)
2634 flags |= pitch / 16;
2635 else
2636 flags |= pitch / 8;
2637
2638
2639 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
2640 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
2641 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
2642 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
2643 return 0;
2644 }
2645
2646 void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
2647 {
2648 int surf_index = reg * 16;
2649 WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
2650 }
2651
2652 void r100_bandwidth_update(struct radeon_device *rdev)
2653 {
2654 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
2655 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
2656 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
2657 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
2658 fixed20_12 memtcas_ff[8] = {
2659 dfixed_init(1),
2660 dfixed_init(2),
2661 dfixed_init(3),
2662 dfixed_init(0),
2663 dfixed_init_half(1),
2664 dfixed_init_half(2),
2665 dfixed_init(0),
2666 };
2667 fixed20_12 memtcas_rs480_ff[8] = {
2668 dfixed_init(0),
2669 dfixed_init(1),
2670 dfixed_init(2),
2671 dfixed_init(3),
2672 dfixed_init(0),
2673 dfixed_init_half(1),
2674 dfixed_init_half(2),
2675 dfixed_init_half(3),
2676 };
2677 fixed20_12 memtcas2_ff[8] = {
2678 dfixed_init(0),
2679 dfixed_init(1),
2680 dfixed_init(2),
2681 dfixed_init(3),
2682 dfixed_init(4),
2683 dfixed_init(5),
2684 dfixed_init(6),
2685 dfixed_init(7),
2686 };
2687 fixed20_12 memtrbs[8] = {
2688 dfixed_init(1),
2689 dfixed_init_half(1),
2690 dfixed_init(2),
2691 dfixed_init_half(2),
2692 dfixed_init(3),
2693 dfixed_init_half(3),
2694 dfixed_init(4),
2695 dfixed_init_half(4)
2696 };
2697 fixed20_12 memtrbs_r4xx[8] = {
2698 dfixed_init(4),
2699 dfixed_init(5),
2700 dfixed_init(6),
2701 dfixed_init(7),
2702 dfixed_init(8),
2703 dfixed_init(9),
2704 dfixed_init(10),
2705 dfixed_init(11)
2706 };
2707 fixed20_12 min_mem_eff;
2708 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
2709 fixed20_12 cur_latency_mclk, cur_latency_sclk;
2710 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
2711 disp_drain_rate2, read_return_rate;
2712 fixed20_12 time_disp1_drop_priority;
2713 int c;
2714 int cur_size = 16; /* in octawords */
2715 int critical_point = 0, critical_point2;
2716 /* uint32_t read_return_rate, time_disp1_drop_priority; */
2717 int stop_req, max_stop_req;
2718 struct drm_display_mode *mode1 = NULL;
2719 struct drm_display_mode *mode2 = NULL;
2720 uint32_t pixel_bytes1 = 0;
2721 uint32_t pixel_bytes2 = 0;
2722
2723 radeon_update_display_priority(rdev);
2724
2725 if (rdev->mode_info.crtcs[0]->base.enabled) {
2726 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
2727 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
2728 }
2729 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
2730 if (rdev->mode_info.crtcs[1]->base.enabled) {
2731 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
2732 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
2733 }
2734 }
2735
2736 min_mem_eff.full = dfixed_const_8(0);
2737 /* get modes */
2738 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
2739 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
2740 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
2741 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
2742 /* check crtc enables */
2743 if (mode2)
2744 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
2745 if (mode1)
2746 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
2747 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
2748 }
2749
2750 /*
2751 * determine is there is enough bw for current mode
2752 */
2753 sclk_ff = rdev->pm.sclk;
2754 mclk_ff = rdev->pm.mclk;
2755
2756 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
2757 temp_ff.full = dfixed_const(temp);
2758 mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
2759
2760 pix_clk.full = 0;
2761 pix_clk2.full = 0;
2762 peak_disp_bw.full = 0;
2763 if (mode1) {
2764 temp_ff.full = dfixed_const(1000);
2765 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
2766 pix_clk.full = dfixed_div(pix_clk, temp_ff);
2767 temp_ff.full = dfixed_const(pixel_bytes1);
2768 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
2769 }
2770 if (mode2) {
2771 temp_ff.full = dfixed_const(1000);
2772 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
2773 pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
2774 temp_ff.full = dfixed_const(pixel_bytes2);
2775 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
2776 }
2777
2778 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
2779 if (peak_disp_bw.full >= mem_bw.full) {
2780 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
2781 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
2782 }
2783
2784 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
2785 temp = RREG32(RADEON_MEM_TIMING_CNTL);
2786 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
2787 mem_trcd = ((temp >> 2) & 0x3) + 1;
2788 mem_trp = ((temp & 0x3)) + 1;
2789 mem_tras = ((temp & 0x70) >> 4) + 1;
2790 } else if (rdev->family == CHIP_R300 ||
2791 rdev->family == CHIP_R350) { /* r300, r350 */
2792 mem_trcd = (temp & 0x7) + 1;
2793 mem_trp = ((temp >> 8) & 0x7) + 1;
2794 mem_tras = ((temp >> 11) & 0xf) + 4;
2795 } else if (rdev->family == CHIP_RV350 ||
2796 rdev->family <= CHIP_RV380) {
2797 /* rv3x0 */
2798 mem_trcd = (temp & 0x7) + 3;
2799 mem_trp = ((temp >> 8) & 0x7) + 3;
2800 mem_tras = ((temp >> 11) & 0xf) + 6;
2801 } else if (rdev->family == CHIP_R420 ||
2802 rdev->family == CHIP_R423 ||
2803 rdev->family == CHIP_RV410) {
2804 /* r4xx */
2805 mem_trcd = (temp & 0xf) + 3;
2806 if (mem_trcd > 15)
2807 mem_trcd = 15;
2808 mem_trp = ((temp >> 8) & 0xf) + 3;
2809 if (mem_trp > 15)
2810 mem_trp = 15;
2811 mem_tras = ((temp >> 12) & 0x1f) + 6;
2812 if (mem_tras > 31)
2813 mem_tras = 31;
2814 } else { /* RV200, R200 */
2815 mem_trcd = (temp & 0x7) + 1;
2816 mem_trp = ((temp >> 8) & 0x7) + 1;
2817 mem_tras = ((temp >> 12) & 0xf) + 4;
2818 }
2819 /* convert to FF */
2820 trcd_ff.full = dfixed_const(mem_trcd);
2821 trp_ff.full = dfixed_const(mem_trp);
2822 tras_ff.full = dfixed_const(mem_tras);
2823
2824 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
2825 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
2826 data = (temp & (7 << 20)) >> 20;
2827 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
2828 if (rdev->family == CHIP_RS480) /* don't think rs400 */
2829 tcas_ff = memtcas_rs480_ff[data];
2830 else
2831 tcas_ff = memtcas_ff[data];
2832 } else
2833 tcas_ff = memtcas2_ff[data];
2834
2835 if (rdev->family == CHIP_RS400 ||
2836 rdev->family == CHIP_RS480) {
2837 /* extra cas latency stored in bits 23-25 0-4 clocks */
2838 data = (temp >> 23) & 0x7;
2839 if (data < 5)
2840 tcas_ff.full += dfixed_const(data);
2841 }
2842
2843 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
2844 /* on the R300, Tcas is included in Trbs.
2845 */
2846 temp = RREG32(RADEON_MEM_CNTL);
2847 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
2848 if (data == 1) {
2849 if (R300_MEM_USE_CD_CH_ONLY & temp) {
2850 temp = RREG32(R300_MC_IND_INDEX);
2851 temp &= ~R300_MC_IND_ADDR_MASK;
2852 temp |= R300_MC_READ_CNTL_CD_mcind;
2853 WREG32(R300_MC_IND_INDEX, temp);
2854 temp = RREG32(R300_MC_IND_DATA);
2855 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
2856 } else {
2857 temp = RREG32(R300_MC_READ_CNTL_AB);
2858 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2859 }
2860 } else {
2861 temp = RREG32(R300_MC_READ_CNTL_AB);
2862 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2863 }
2864 if (rdev->family == CHIP_RV410 ||
2865 rdev->family == CHIP_R420 ||
2866 rdev->family == CHIP_R423)
2867 trbs_ff = memtrbs_r4xx[data];
2868 else
2869 trbs_ff = memtrbs[data];
2870 tcas_ff.full += trbs_ff.full;
2871 }
2872
2873 sclk_eff_ff.full = sclk_ff.full;
2874
2875 if (rdev->flags & RADEON_IS_AGP) {
2876 fixed20_12 agpmode_ff;
2877 agpmode_ff.full = dfixed_const(radeon_agpmode);
2878 temp_ff.full = dfixed_const_666(16);
2879 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
2880 }
2881 /* TODO PCIE lanes may affect this - agpmode == 16?? */
2882
2883 if (ASIC_IS_R300(rdev)) {
2884 sclk_delay_ff.full = dfixed_const(250);
2885 } else {
2886 if ((rdev->family == CHIP_RV100) ||
2887 rdev->flags & RADEON_IS_IGP) {
2888 if (rdev->mc.vram_is_ddr)
2889 sclk_delay_ff.full = dfixed_const(41);
2890 else
2891 sclk_delay_ff.full = dfixed_const(33);
2892 } else {
2893 if (rdev->mc.vram_width == 128)
2894 sclk_delay_ff.full = dfixed_const(57);
2895 else
2896 sclk_delay_ff.full = dfixed_const(41);
2897 }
2898 }
2899
2900 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
2901
2902 if (rdev->mc.vram_is_ddr) {
2903 if (rdev->mc.vram_width == 32) {
2904 k1.full = dfixed_const(40);
2905 c = 3;
2906 } else {
2907 k1.full = dfixed_const(20);
2908 c = 1;
2909 }
2910 } else {
2911 k1.full = dfixed_const(40);
2912 c = 3;
2913 }
2914
2915 temp_ff.full = dfixed_const(2);
2916 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
2917 temp_ff.full = dfixed_const(c);
2918 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
2919 temp_ff.full = dfixed_const(4);
2920 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
2921 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
2922 mc_latency_mclk.full += k1.full;
2923
2924 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
2925 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
2926
2927 /*
2928 HW cursor time assuming worst case of full size colour cursor.
2929 */
2930 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
2931 temp_ff.full += trcd_ff.full;
2932 if (temp_ff.full < tras_ff.full)
2933 temp_ff.full = tras_ff.full;
2934 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
2935
2936 temp_ff.full = dfixed_const(cur_size);
2937 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
2938 /*
2939 Find the total latency for the display data.
2940 */
2941 disp_latency_overhead.full = dfixed_const(8);
2942 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
2943 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
2944 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
2945
2946 if (mc_latency_mclk.full > mc_latency_sclk.full)
2947 disp_latency.full = mc_latency_mclk.full;
2948 else
2949 disp_latency.full = mc_latency_sclk.full;
2950
2951 /* setup Max GRPH_STOP_REQ default value */
2952 if (ASIC_IS_RV100(rdev))
2953 max_stop_req = 0x5c;
2954 else
2955 max_stop_req = 0x7c;
2956
2957 if (mode1) {
2958 /* CRTC1
2959 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
2960 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
2961 */
2962 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
2963
2964 if (stop_req > max_stop_req)
2965 stop_req = max_stop_req;
2966
2967 /*
2968 Find the drain rate of the display buffer.
2969 */
2970 temp_ff.full = dfixed_const((16/pixel_bytes1));
2971 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
2972
2973 /*
2974 Find the critical point of the display buffer.
2975 */
2976 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
2977 crit_point_ff.full += dfixed_const_half(0);
2978
2979 critical_point = dfixed_trunc(crit_point_ff);
2980
2981 if (rdev->disp_priority == 2) {
2982 critical_point = 0;
2983 }
2984
2985 /*
2986 The critical point should never be above max_stop_req-4. Setting
2987 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
2988 */
2989 if (max_stop_req - critical_point < 4)
2990 critical_point = 0;
2991
2992 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
2993 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
2994 critical_point = 0x10;
2995 }
2996
2997 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
2998 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
2999 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3000 temp &= ~(RADEON_GRPH_START_REQ_MASK);
3001 if ((rdev->family == CHIP_R350) &&
3002 (stop_req > 0x15)) {
3003 stop_req -= 0x10;
3004 }
3005 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3006 temp |= RADEON_GRPH_BUFFER_SIZE;
3007 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
3008 RADEON_GRPH_CRITICAL_AT_SOF |
3009 RADEON_GRPH_STOP_CNTL);
3010 /*
3011 Write the result into the register.
3012 */
3013 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3014 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3015
3016 #if 0
3017 if ((rdev->family == CHIP_RS400) ||
3018 (rdev->family == CHIP_RS480)) {
3019 /* attempt to program RS400 disp regs correctly ??? */
3020 temp = RREG32(RS400_DISP1_REG_CNTL);
3021 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
3022 RS400_DISP1_STOP_REQ_LEVEL_MASK);
3023 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
3024 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3025 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3026 temp = RREG32(RS400_DMIF_MEM_CNTL1);
3027 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
3028 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
3029 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
3030 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
3031 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
3032 }
3033 #endif
3034
3035 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
3036 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
3037 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
3038 }
3039
3040 if (mode2) {
3041 u32 grph2_cntl;
3042 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
3043
3044 if (stop_req > max_stop_req)
3045 stop_req = max_stop_req;
3046
3047 /*
3048 Find the drain rate of the display buffer.
3049 */
3050 temp_ff.full = dfixed_const((16/pixel_bytes2));
3051 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
3052
3053 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
3054 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
3055 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3056 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
3057 if ((rdev->family == CHIP_R350) &&
3058 (stop_req > 0x15)) {
3059 stop_req -= 0x10;
3060 }
3061 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3062 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
3063 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
3064 RADEON_GRPH_CRITICAL_AT_SOF |
3065 RADEON_GRPH_STOP_CNTL);
3066
3067 if ((rdev->family == CHIP_RS100) ||
3068 (rdev->family == CHIP_RS200))
3069 critical_point2 = 0;
3070 else {
3071 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
3072 temp_ff.full = dfixed_const(temp);
3073 temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
3074 if (sclk_ff.full < temp_ff.full)
3075 temp_ff.full = sclk_ff.full;
3076
3077 read_return_rate.full = temp_ff.full;
3078
3079 if (mode1) {
3080 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
3081 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
3082 } else {
3083 time_disp1_drop_priority.full = 0;
3084 }
3085 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
3086 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
3087 crit_point_ff.full += dfixed_const_half(0);
3088
3089 critical_point2 = dfixed_trunc(crit_point_ff);
3090
3091 if (rdev->disp_priority == 2) {
3092 critical_point2 = 0;
3093 }
3094
3095 if (max_stop_req - critical_point2 < 4)
3096 critical_point2 = 0;
3097
3098 }
3099
3100 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
3101 /* some R300 cards have problem with this set to 0 */
3102 critical_point2 = 0x10;
3103 }
3104
3105 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3106 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3107
3108 if ((rdev->family == CHIP_RS400) ||
3109 (rdev->family == CHIP_RS480)) {
3110 #if 0
3111 /* attempt to program RS400 disp2 regs correctly ??? */
3112 temp = RREG32(RS400_DISP2_REQ_CNTL1);
3113 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
3114 RS400_DISP2_STOP_REQ_LEVEL_MASK);
3115 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
3116 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3117 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3118 temp = RREG32(RS400_DISP2_REQ_CNTL2);
3119 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
3120 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
3121 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
3122 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
3123 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
3124 #endif
3125 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
3126 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
3127 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
3128 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
3129 }
3130
3131 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
3132 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
3133 }
3134 }
3135
3136 static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
3137 {
3138 DRM_ERROR("pitch %d\n", t->pitch);
3139 DRM_ERROR("use_pitch %d\n", t->use_pitch);
3140 DRM_ERROR("width %d\n", t->width);
3141 DRM_ERROR("width_11 %d\n", t->width_11);
3142 DRM_ERROR("height %d\n", t->height);
3143 DRM_ERROR("height_11 %d\n", t->height_11);
3144 DRM_ERROR("num levels %d\n", t->num_levels);
3145 DRM_ERROR("depth %d\n", t->txdepth);
3146 DRM_ERROR("bpp %d\n", t->cpp);
3147 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
3148 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
3149 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
3150 DRM_ERROR("compress format %d\n", t->compress_format);
3151 }
3152
3153 static int r100_track_compress_size(int compress_format, int w, int h)
3154 {
3155 int block_width, block_height, block_bytes;
3156 int wblocks, hblocks;
3157 int min_wblocks;
3158 int sz;
3159
3160 block_width = 4;
3161 block_height = 4;
3162
3163 switch (compress_format) {
3164 case R100_TRACK_COMP_DXT1:
3165 block_bytes = 8;
3166 min_wblocks = 4;
3167 break;
3168 default:
3169 case R100_TRACK_COMP_DXT35:
3170 block_bytes = 16;
3171 min_wblocks = 2;
3172 break;
3173 }
3174
3175 hblocks = (h + block_height - 1) / block_height;
3176 wblocks = (w + block_width - 1) / block_width;
3177 if (wblocks < min_wblocks)
3178 wblocks = min_wblocks;
3179 sz = wblocks * hblocks * block_bytes;
3180 return sz;
3181 }
3182
3183 static int r100_cs_track_cube(struct radeon_device *rdev,
3184 struct r100_cs_track *track, unsigned idx)
3185 {
3186 unsigned face, w, h;
3187 struct radeon_bo *cube_robj;
3188 unsigned long size;
3189 unsigned compress_format = track->textures[idx].compress_format;
3190
3191 for (face = 0; face < 5; face++) {
3192 cube_robj = track->textures[idx].cube_info[face].robj;
3193 w = track->textures[idx].cube_info[face].width;
3194 h = track->textures[idx].cube_info[face].height;
3195
3196 if (compress_format) {
3197 size = r100_track_compress_size(compress_format, w, h);
3198 } else
3199 size = w * h;
3200 size *= track->textures[idx].cpp;
3201
3202 size += track->textures[idx].cube_info[face].offset;
3203
3204 if (size > radeon_bo_size(cube_robj)) {
3205 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
3206 size, radeon_bo_size(cube_robj));
3207 r100_cs_track_texture_print(&track->textures[idx]);
3208 return -1;
3209 }
3210 }
3211 return 0;
3212 }
3213
3214 static int r100_cs_track_texture_check(struct radeon_device *rdev,
3215 struct r100_cs_track *track)
3216 {
3217 struct radeon_bo *robj;
3218 unsigned long size;
3219 unsigned u, i, w, h, d;
3220 int ret;
3221
3222 for (u = 0; u < track->num_texture; u++) {
3223 if (!track->textures[u].enabled)
3224 continue;
3225 robj = track->textures[u].robj;
3226 if (robj == NULL) {
3227 DRM_ERROR("No texture bound to unit %u\n", u);
3228 return -EINVAL;
3229 }
3230 size = 0;
3231 for (i = 0; i <= track->textures[u].num_levels; i++) {
3232 if (track->textures[u].use_pitch) {
3233 if (rdev->family < CHIP_R300)
3234 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
3235 else
3236 w = track->textures[u].pitch / (1 << i);
3237 } else {
3238 w = track->textures[u].width;
3239 if (rdev->family >= CHIP_RV515)
3240 w |= track->textures[u].width_11;
3241 w = w / (1 << i);
3242 if (track->textures[u].roundup_w)
3243 w = roundup_pow_of_two(w);
3244 }
3245 h = track->textures[u].height;
3246 if (rdev->family >= CHIP_RV515)
3247 h |= track->textures[u].height_11;
3248 h = h / (1 << i);
3249 if (track->textures[u].roundup_h)
3250 h = roundup_pow_of_two(h);
3251 if (track->textures[u].tex_coord_type == 1) {
3252 d = (1 << track->textures[u].txdepth) / (1 << i);
3253 if (!d)
3254 d = 1;
3255 } else {
3256 d = 1;
3257 }
3258 if (track->textures[u].compress_format) {
3259
3260 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
3261 /* compressed textures are block based */
3262 } else
3263 size += w * h * d;
3264 }
3265 size *= track->textures[u].cpp;
3266
3267 switch (track->textures[u].tex_coord_type) {
3268 case 0:
3269 case 1:
3270 break;
3271 case 2:
3272 if (track->separate_cube) {
3273 ret = r100_cs_track_cube(rdev, track, u);
3274 if (ret)
3275 return ret;
3276 } else
3277 size *= 6;
3278 break;
3279 default:
3280 DRM_ERROR("Invalid texture coordinate type %u for unit "
3281 "%u\n", track->textures[u].tex_coord_type, u);
3282 return -EINVAL;
3283 }
3284 if (size > radeon_bo_size(robj)) {
3285 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
3286 "%lu\n", u, size, radeon_bo_size(robj));
3287 r100_cs_track_texture_print(&track->textures[u]);
3288 return -EINVAL;
3289 }
3290 }
3291 return 0;
3292 }
3293
3294 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3295 {
3296 unsigned i;
3297 unsigned long size;
3298 unsigned prim_walk;
3299 unsigned nverts;
3300 unsigned num_cb = track->num_cb;
3301
3302 if (!track->zb_cb_clear && !track->color_channel_mask &&
3303 !track->blend_read_enable)
3304 num_cb = 0;
3305
3306 for (i = 0; i < num_cb; i++) {
3307 if (track->cb[i].robj == NULL) {
3308 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
3309 return -EINVAL;
3310 }
3311 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
3312 size += track->cb[i].offset;
3313 if (size > radeon_bo_size(track->cb[i].robj)) {
3314 DRM_ERROR("[drm] Buffer too small for color buffer %d "
3315 "(need %lu have %lu) !\n", i, size,
3316 radeon_bo_size(track->cb[i].robj));
3317 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
3318 i, track->cb[i].pitch, track->cb[i].cpp,
3319 track->cb[i].offset, track->maxy);
3320 return -EINVAL;
3321 }
3322 }
3323 if (track->z_enabled) {
3324 if (track->zb.robj == NULL) {
3325 DRM_ERROR("[drm] No buffer for z buffer !\n");
3326 return -EINVAL;
3327 }
3328 size = track->zb.pitch * track->zb.cpp * track->maxy;
3329 size += track->zb.offset;
3330 if (size > radeon_bo_size(track->zb.robj)) {
3331 DRM_ERROR("[drm] Buffer too small for z buffer "
3332 "(need %lu have %lu) !\n", size,
3333 radeon_bo_size(track->zb.robj));
3334 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
3335 track->zb.pitch, track->zb.cpp,
3336 track->zb.offset, track->maxy);
3337 return -EINVAL;
3338 }
3339 }
3340 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
3341 if (track->vap_vf_cntl & (1 << 14)) {
3342 nverts = track->vap_alt_nverts;
3343 } else {
3344 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
3345 }
3346 switch (prim_walk) {
3347 case 1:
3348 for (i = 0; i < track->num_arrays; i++) {
3349 size = track->arrays[i].esize * track->max_indx * 4;
3350 if (track->arrays[i].robj == NULL) {
3351 DRM_ERROR("(PW %u) Vertex array %u no buffer "
3352 "bound\n", prim_walk, i);
3353 return -EINVAL;
3354 }
3355 if (size > radeon_bo_size(track->arrays[i].robj)) {
3356 dev_err(rdev->dev, "(PW %u) Vertex array %u "
3357 "need %lu dwords have %lu dwords\n",
3358 prim_walk, i, size >> 2,
3359 radeon_bo_size(track->arrays[i].robj)
3360 >> 2);
3361 DRM_ERROR("Max indices %u\n", track->max_indx);
3362 return -EINVAL;
3363 }
3364 }
3365 break;
3366 case 2:
3367 for (i = 0; i < track->num_arrays; i++) {
3368 size = track->arrays[i].esize * (nverts - 1) * 4;
3369 if (track->arrays[i].robj == NULL) {
3370 DRM_ERROR("(PW %u) Vertex array %u no buffer "
3371 "bound\n", prim_walk, i);
3372 return -EINVAL;
3373 }
3374 if (size > radeon_bo_size(track->arrays[i].robj)) {
3375 dev_err(rdev->dev, "(PW %u) Vertex array %u "
3376 "need %lu dwords have %lu dwords\n",
3377 prim_walk, i, size >> 2,
3378 radeon_bo_size(track->arrays[i].robj)
3379 >> 2);
3380 return -EINVAL;
3381 }
3382 }
3383 break;
3384 case 3:
3385 size = track->vtx_size * nverts;
3386 if (size != track->immd_dwords) {
3387 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
3388 track->immd_dwords, size);
3389 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
3390 nverts, track->vtx_size);
3391 return -EINVAL;
3392 }
3393 break;
3394 default:
3395 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
3396 prim_walk);
3397 return -EINVAL;
3398 }
3399 return r100_cs_track_texture_check(rdev, track);
3400 }
3401
3402 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
3403 {
3404 unsigned i, face;
3405
3406 if (rdev->family < CHIP_R300) {
3407 track->num_cb = 1;
3408 if (rdev->family <= CHIP_RS200)
3409 track->num_texture = 3;
3410 else
3411 track->num_texture = 6;
3412 track->maxy = 2048;
3413 track->separate_cube = 1;
3414 } else {
3415 track->num_cb = 4;
3416 track->num_texture = 16;
3417 track->maxy = 4096;
3418 track->separate_cube = 0;
3419 }
3420
3421 for (i = 0; i < track->num_cb; i++) {
3422 track->cb[i].robj = NULL;
3423 track->cb[i].pitch = 8192;
3424 track->cb[i].cpp = 16;
3425 track->cb[i].offset = 0;
3426 }
3427 track->z_enabled = true;
3428 track->zb.robj = NULL;
3429 track->zb.pitch = 8192;
3430 track->zb.cpp = 4;
3431 track->zb.offset = 0;
3432 track->vtx_size = 0x7F;
3433 track->immd_dwords = 0xFFFFFFFFUL;
3434 track->num_arrays = 11;
3435 track->max_indx = 0x00FFFFFFUL;
3436 for (i = 0; i < track->num_arrays; i++) {
3437 track->arrays[i].robj = NULL;
3438 track->arrays[i].esize = 0x7F;
3439 }
3440 for (i = 0; i < track->num_texture; i++) {
3441 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
3442 track->textures[i].pitch = 16536;
3443 track->textures[i].width = 16536;
3444 track->textures[i].height = 16536;
3445 track->textures[i].width_11 = 1 << 11;
3446 track->textures[i].height_11 = 1 << 11;
3447 track->textures[i].num_levels = 12;
3448 if (rdev->family <= CHIP_RS200) {
3449 track->textures[i].tex_coord_type = 0;
3450 track->textures[i].txdepth = 0;
3451 } else {
3452 track->textures[i].txdepth = 16;
3453 track->textures[i].tex_coord_type = 1;
3454 }
3455 track->textures[i].cpp = 64;
3456 track->textures[i].robj = NULL;
3457 /* CS IB emission code makes sure texture unit are disabled */
3458 track->textures[i].enabled = false;
3459 track->textures[i].roundup_w = true;
3460 track->textures[i].roundup_h = true;
3461 if (track->separate_cube)
3462 for (face = 0; face < 5; face++) {
3463 track->textures[i].cube_info[face].robj = NULL;
3464 track->textures[i].cube_info[face].width = 16536;
3465 track->textures[i].cube_info[face].height = 16536;
3466 track->textures[i].cube_info[face].offset = 0;
3467 }
3468 }
3469 }
3470
3471 int r100_ring_test(struct radeon_device *rdev)
3472 {
3473 uint32_t scratch;
3474 uint32_t tmp = 0;
3475 unsigned i;
3476 int r;
3477
3478 r = radeon_scratch_get(rdev, &scratch);
3479 if (r) {
3480 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
3481 return r;
3482 }
3483 WREG32(scratch, 0xCAFEDEAD);
3484 r = radeon_ring_lock(rdev, 2);
3485 if (r) {
3486 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3487 radeon_scratch_free(rdev, scratch);
3488 return r;
3489 }
3490 radeon_ring_write(rdev, PACKET0(scratch, 0));
3491 radeon_ring_write(rdev, 0xDEADBEEF);
3492 radeon_ring_unlock_commit(rdev);
3493 for (i = 0; i < rdev->usec_timeout; i++) {
3494 tmp = RREG32(scratch);
3495 if (tmp == 0xDEADBEEF) {
3496 break;
3497 }
3498 DRM_UDELAY(1);
3499 }
3500 if (i < rdev->usec_timeout) {
3501 DRM_INFO("ring test succeeded in %d usecs\n", i);
3502 } else {
3503 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
3504 scratch, tmp);
3505 r = -EINVAL;
3506 }
3507 radeon_scratch_free(rdev, scratch);
3508 return r;
3509 }
3510
3511 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3512 {
3513 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
3514 radeon_ring_write(rdev, ib->gpu_addr);
3515 radeon_ring_write(rdev, ib->length_dw);
3516 }
3517
3518 int r100_ib_test(struct radeon_device *rdev)
3519 {
3520 struct radeon_ib *ib;
3521 uint32_t scratch;
3522 uint32_t tmp = 0;
3523 unsigned i;
3524 int r;
3525
3526 r = radeon_scratch_get(rdev, &scratch);
3527 if (r) {
3528 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3529 return r;
3530 }
3531 WREG32(scratch, 0xCAFEDEAD);
3532 r = radeon_ib_get(rdev, &ib);
3533 if (r) {
3534 return r;
3535 }
3536 ib->ptr[0] = PACKET0(scratch, 0);
3537 ib->ptr[1] = 0xDEADBEEF;
3538 ib->ptr[2] = PACKET2(0);
3539 ib->ptr[3] = PACKET2(0);
3540 ib->ptr[4] = PACKET2(0);
3541 ib->ptr[5] = PACKET2(0);
3542 ib->ptr[6] = PACKET2(0);
3543 ib->ptr[7] = PACKET2(0);
3544 ib->length_dw = 8;
3545 r = radeon_ib_schedule(rdev, ib);
3546 if (r) {
3547 radeon_scratch_free(rdev, scratch);
3548 radeon_ib_free(rdev, &ib);
3549 return r;
3550 }
3551 r = radeon_fence_wait(ib->fence, false);
3552 if (r) {
3553 return r;
3554 }
3555 for (i = 0; i < rdev->usec_timeout; i++) {
3556 tmp = RREG32(scratch);
3557 if (tmp == 0xDEADBEEF) {
3558 break;
3559 }
3560 DRM_UDELAY(1);
3561 }
3562 if (i < rdev->usec_timeout) {
3563 DRM_INFO("ib test succeeded in %u usecs\n", i);
3564 } else {
3565 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
3566 scratch, tmp);
3567 r = -EINVAL;
3568 }
3569 radeon_scratch_free(rdev, scratch);
3570 radeon_ib_free(rdev, &ib);
3571 return r;
3572 }
3573
3574 void r100_ib_fini(struct radeon_device *rdev)
3575 {
3576 radeon_ib_pool_fini(rdev);
3577 }
3578
3579 int r100_ib_init(struct radeon_device *rdev)
3580 {
3581 int r;
3582
3583 r = radeon_ib_pool_init(rdev);
3584 if (r) {
3585 dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r);
3586 r100_ib_fini(rdev);
3587 return r;
3588 }
3589 r = r100_ib_test(rdev);
3590 if (r) {
3591 dev_err(rdev->dev, "failled testing IB (%d).\n", r);
3592 r100_ib_fini(rdev);
3593 return r;
3594 }
3595 return 0;
3596 }
3597
3598 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3599 {
3600 /* Shutdown CP we shouldn't need to do that but better be safe than
3601 * sorry
3602 */
3603 rdev->cp.ready = false;
3604 WREG32(R_000740_CP_CSQ_CNTL, 0);
3605
3606 /* Save few CRTC registers */
3607 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
3608 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
3609 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
3610 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
3611 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3612 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
3613 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
3614 }
3615
3616 /* Disable VGA aperture access */
3617 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
3618 /* Disable cursor, overlay, crtc */
3619 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
3620 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
3621 S_000054_CRTC_DISPLAY_DIS(1));
3622 WREG32(R_000050_CRTC_GEN_CNTL,
3623 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
3624 S_000050_CRTC_DISP_REQ_EN_B(1));
3625 WREG32(R_000420_OV0_SCALE_CNTL,
3626 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
3627 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
3628 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3629 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
3630 S_000360_CUR2_LOCK(1));
3631 WREG32(R_0003F8_CRTC2_GEN_CNTL,
3632 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
3633 S_0003F8_CRTC2_DISPLAY_DIS(1) |
3634 S_0003F8_CRTC2_DISP_REQ_EN_B(1));
3635 WREG32(R_000360_CUR2_OFFSET,
3636 C_000360_CUR2_LOCK & save->CUR2_OFFSET);
3637 }
3638 }
3639
3640 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3641 {
3642 /* Update base address for crtc */
3643 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3644 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3645 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3646 }
3647 /* Restore CRTC registers */
3648 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
3649 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
3650 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
3651 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3652 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
3653 }
3654 }
3655
3656 void r100_vga_render_disable(struct radeon_device *rdev)
3657 {
3658 u32 tmp;
3659
3660 tmp = RREG8(R_0003C2_GENMO_WT);
3661 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
3662 }
3663
3664 static void r100_debugfs(struct radeon_device *rdev)
3665 {
3666 int r;
3667
3668 r = r100_debugfs_mc_info_init(rdev);
3669 if (r)
3670 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
3671 }
3672
3673 static void r100_mc_program(struct radeon_device *rdev)
3674 {
3675 struct r100_mc_save save;
3676
3677 /* Stops all mc clients */
3678 r100_mc_stop(rdev, &save);
3679 if (rdev->flags & RADEON_IS_AGP) {
3680 WREG32(R_00014C_MC_AGP_LOCATION,
3681 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
3682 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
3683 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
3684 if (rdev->family > CHIP_RV200)
3685 WREG32(R_00015C_AGP_BASE_2,
3686 upper_32_bits(rdev->mc.agp_base) & 0xff);
3687 } else {
3688 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
3689 WREG32(R_000170_AGP_BASE, 0);
3690 if (rdev->family > CHIP_RV200)
3691 WREG32(R_00015C_AGP_BASE_2, 0);
3692 }
3693 /* Wait for mc idle */
3694 if (r100_mc_wait_for_idle(rdev))
3695 dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
3696 /* Program MC, should be a 32bits limited address space */
3697 WREG32(R_000148_MC_FB_LOCATION,
3698 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
3699 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
3700 r100_mc_resume(rdev, &save);
3701 }
3702
3703 void r100_clock_startup(struct radeon_device *rdev)
3704 {
3705 u32 tmp;
3706
3707 if (radeon_dynclks != -1 && radeon_dynclks)
3708 radeon_legacy_set_clock_gating(rdev, 1);
3709 /* We need to force on some of the block */
3710 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
3711 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
3712 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
3713 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
3714 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
3715 }
3716
3717 static int r100_startup(struct radeon_device *rdev)
3718 {
3719 int r;
3720
3721 /* set common regs */
3722 r100_set_common_regs(rdev);
3723 /* program mc */
3724 r100_mc_program(rdev);
3725 /* Resume clock */
3726 r100_clock_startup(rdev);
3727 /* Initialize GPU configuration (# pipes, ...) */
3728 // r100_gpu_init(rdev);
3729 /* Initialize GART (initialize after TTM so we can allocate
3730 * memory through TTM but finalize after TTM) */
3731 r100_enable_bm(rdev);
3732 if (rdev->flags & RADEON_IS_PCI) {
3733 r = r100_pci_gart_enable(rdev);
3734 if (r)
3735 return r;
3736 }
3737 /* Enable IRQ */
3738 r100_irq_set(rdev);
3739 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3740 /* 1M ring buffer */
3741 r = r100_cp_init(rdev, 1024 * 1024);
3742 if (r) {
3743 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
3744 return r;
3745 }
3746 r = r100_wb_init(rdev);
3747 if (r)
3748 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
3749 r = r100_ib_init(rdev);
3750 if (r) {
3751 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
3752 return r;
3753 }
3754 return 0;
3755 }
3756
3757 int r100_resume(struct radeon_device *rdev)
3758 {
3759 /* Make sur GART are not working */
3760 if (rdev->flags & RADEON_IS_PCI)
3761 r100_pci_gart_disable(rdev);
3762 /* Resume clock before doing reset */
3763 r100_clock_startup(rdev);
3764 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3765 if (radeon_asic_reset(rdev)) {
3766 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3767 RREG32(R_000E40_RBBM_STATUS),
3768 RREG32(R_0007C0_CP_STAT));
3769 }
3770 /* post */
3771 radeon_combios_asic_init(rdev->ddev);
3772 /* Resume clock after posting */
3773 r100_clock_startup(rdev);
3774 /* Initialize surface registers */
3775 radeon_surface_init(rdev);
3776 return r100_startup(rdev);
3777 }
3778
3779 int r100_suspend(struct radeon_device *rdev)
3780 {
3781 r100_cp_disable(rdev);
3782 r100_wb_disable(rdev);
3783 r100_irq_disable(rdev);
3784 if (rdev->flags & RADEON_IS_PCI)
3785 r100_pci_gart_disable(rdev);
3786 return 0;
3787 }
3788
3789 void r100_fini(struct radeon_device *rdev)
3790 {
3791 r100_cp_fini(rdev);
3792 r100_wb_fini(rdev);
3793 r100_ib_fini(rdev);
3794 radeon_gem_fini(rdev);
3795 if (rdev->flags & RADEON_IS_PCI)
3796 r100_pci_gart_fini(rdev);
3797 radeon_agp_fini(rdev);
3798 radeon_irq_kms_fini(rdev);
3799 radeon_fence_driver_fini(rdev);
3800 radeon_bo_fini(rdev);
3801 radeon_atombios_fini(rdev);
3802 kfree(rdev->bios);
3803 rdev->bios = NULL;
3804 }
3805
3806 /*
3807 * Due to how kexec works, it can leave the hw fully initialised when it
3808 * boots the new kernel. However doing our init sequence with the CP and
3809 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
3810 * do some quick sanity checks and restore sane values to avoid this
3811 * problem.
3812 */
3813 void r100_restore_sanity(struct radeon_device *rdev)
3814 {
3815 u32 tmp;
3816
3817 tmp = RREG32(RADEON_CP_CSQ_CNTL);
3818 if (tmp) {
3819 WREG32(RADEON_CP_CSQ_CNTL, 0);
3820 }
3821 tmp = RREG32(RADEON_CP_RB_CNTL);
3822 if (tmp) {
3823 WREG32(RADEON_CP_RB_CNTL, 0);
3824 }
3825 tmp = RREG32(RADEON_SCRATCH_UMSK);
3826 if (tmp) {
3827 WREG32(RADEON_SCRATCH_UMSK, 0);
3828 }
3829 }
3830
3831 int r100_init(struct radeon_device *rdev)
3832 {
3833 int r;
3834
3835 /* Register debugfs file specific to this group of asics */
3836 r100_debugfs(rdev);
3837 /* Disable VGA */
3838 r100_vga_render_disable(rdev);
3839 /* Initialize scratch registers */
3840 radeon_scratch_init(rdev);
3841 /* Initialize surface registers */
3842 radeon_surface_init(rdev);
3843 /* sanity check some register to avoid hangs like after kexec */
3844 r100_restore_sanity(rdev);
3845 /* TODO: disable VGA need to use VGA request */
3846 /* BIOS*/
3847 if (!radeon_get_bios(rdev)) {
3848 if (ASIC_IS_AVIVO(rdev))
3849 return -EINVAL;
3850 }
3851 if (rdev->is_atom_bios) {
3852 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
3853 return -EINVAL;
3854 } else {
3855 r = radeon_combios_init(rdev);
3856 if (r)
3857 return r;
3858 }
3859 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3860 if (radeon_asic_reset(rdev)) {
3861 dev_warn(rdev->dev,
3862 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3863 RREG32(R_000E40_RBBM_STATUS),
3864 RREG32(R_0007C0_CP_STAT));
3865 }
3866 /* check if cards are posted or not */
3867 if (radeon_boot_test_post_card(rdev) == false)
3868 return -EINVAL;
3869 /* Set asic errata */
3870 r100_errata(rdev);
3871 /* Initialize clocks */
3872 radeon_get_clock_info(rdev->ddev);
3873 /* initialize AGP */
3874 if (rdev->flags & RADEON_IS_AGP) {
3875 r = radeon_agp_init(rdev);
3876 if (r) {
3877 radeon_agp_disable(rdev);
3878 }
3879 }
3880 /* initialize VRAM */
3881 r100_mc_init(rdev);
3882 /* Fence driver */
3883 r = radeon_fence_driver_init(rdev);
3884 if (r)
3885 return r;
3886 r = radeon_irq_kms_init(rdev);
3887 if (r)
3888 return r;
3889 /* Memory manager */
3890 r = radeon_bo_init(rdev);
3891 if (r)
3892 return r;
3893 if (rdev->flags & RADEON_IS_PCI) {
3894 r = r100_pci_gart_init(rdev);
3895 if (r)
3896 return r;
3897 }
3898 r100_set_safe_registers(rdev);
3899 rdev->accel_working = true;
3900 r = r100_startup(rdev);
3901 if (r) {
3902 /* Somethings want wront with the accel init stop accel */
3903 dev_err(rdev->dev, "Disabling GPU acceleration\n");
3904 r100_cp_fini(rdev);
3905 r100_wb_fini(rdev);
3906 r100_ib_fini(rdev);
3907 radeon_irq_kms_fini(rdev);
3908 if (rdev->flags & RADEON_IS_PCI)
3909 r100_pci_gart_fini(rdev);
3910 rdev->accel_working = false;
3911 }
3912 return 0;
3913 }