]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/radeon/r600.c
drm/radeon: add a connector property for dither
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / radeon / r600.c
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/module.h>
32 #include <drm/drmP.h>
33 #include <drm/radeon_drm.h>
34 #include "radeon.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
37 #include "r600d.h"
38 #include "atom.h"
39 #include "avivod.h"
40 #include "radeon_ucode.h"
41
42 /* Firmware Names */
43 MODULE_FIRMWARE("radeon/R600_pfp.bin");
44 MODULE_FIRMWARE("radeon/R600_me.bin");
45 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
46 MODULE_FIRMWARE("radeon/RV610_me.bin");
47 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
48 MODULE_FIRMWARE("radeon/RV630_me.bin");
49 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
50 MODULE_FIRMWARE("radeon/RV620_me.bin");
51 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
52 MODULE_FIRMWARE("radeon/RV635_me.bin");
53 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
54 MODULE_FIRMWARE("radeon/RV670_me.bin");
55 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
56 MODULE_FIRMWARE("radeon/RS780_me.bin");
57 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
58 MODULE_FIRMWARE("radeon/RV770_me.bin");
59 MODULE_FIRMWARE("radeon/RV770_smc.bin");
60 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV730_me.bin");
62 MODULE_FIRMWARE("radeon/RV730_smc.bin");
63 MODULE_FIRMWARE("radeon/RV740_smc.bin");
64 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
65 MODULE_FIRMWARE("radeon/RV710_me.bin");
66 MODULE_FIRMWARE("radeon/RV710_smc.bin");
67 MODULE_FIRMWARE("radeon/R600_rlc.bin");
68 MODULE_FIRMWARE("radeon/R700_rlc.bin");
69 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
70 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
71 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
72 MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
73 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
74 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
75 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
76 MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
77 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
78 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
79 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
81 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
82 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
85 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
86 MODULE_FIRMWARE("radeon/PALM_me.bin");
87 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
88 MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
89 MODULE_FIRMWARE("radeon/SUMO_me.bin");
90 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
91 MODULE_FIRMWARE("radeon/SUMO2_me.bin");
92
93 static const u32 crtc_offsets[2] =
94 {
95 0,
96 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
97 };
98
99 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
100
101 /* r600,rv610,rv630,rv620,rv635,rv670 */
102 int r600_mc_wait_for_idle(struct radeon_device *rdev);
103 static void r600_gpu_init(struct radeon_device *rdev);
104 void r600_fini(struct radeon_device *rdev);
105 void r600_irq_disable(struct radeon_device *rdev);
106 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
107 extern int evergreen_rlc_resume(struct radeon_device *rdev);
108
109 /**
110 * r600_get_xclk - get the xclk
111 *
112 * @rdev: radeon_device pointer
113 *
114 * Returns the reference clock used by the gfx engine
115 * (r6xx, IGPs, APUs).
116 */
117 u32 r600_get_xclk(struct radeon_device *rdev)
118 {
119 return rdev->clock.spll.reference_freq;
120 }
121
122 int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
123 {
124 return 0;
125 }
126
127 void dce3_program_fmt(struct drm_encoder *encoder)
128 {
129 struct drm_device *dev = encoder->dev;
130 struct radeon_device *rdev = dev->dev_private;
131 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
132 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
133 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
134 int bpc = 0;
135 u32 tmp = 0;
136 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
137
138 if (connector) {
139 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
140 bpc = radeon_get_monitor_bpc(connector);
141 dither = radeon_connector->dither;
142 }
143
144 /* LVDS FMT is set up by atom */
145 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
146 return;
147
148 /* not needed for analog */
149 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
150 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
151 return;
152
153 if (bpc == 0)
154 return;
155
156 switch (bpc) {
157 case 6:
158 if (dither == RADEON_FMT_DITHER_ENABLE)
159 /* XXX sort out optimal dither settings */
160 tmp |= FMT_SPATIAL_DITHER_EN;
161 else
162 tmp |= FMT_TRUNCATE_EN;
163 break;
164 case 8:
165 if (dither == RADEON_FMT_DITHER_ENABLE)
166 /* XXX sort out optimal dither settings */
167 tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
168 else
169 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
170 break;
171 case 10:
172 default:
173 /* not needed */
174 break;
175 }
176
177 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
178 }
179
180 /* get temperature in millidegrees */
181 int rv6xx_get_temp(struct radeon_device *rdev)
182 {
183 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
184 ASIC_T_SHIFT;
185 int actual_temp = temp & 0xff;
186
187 if (temp & 0x100)
188 actual_temp -= 256;
189
190 return actual_temp * 1000;
191 }
192
193 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
194 {
195 int i;
196
197 rdev->pm.dynpm_can_upclock = true;
198 rdev->pm.dynpm_can_downclock = true;
199
200 /* power state array is low to high, default is first */
201 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
202 int min_power_state_index = 0;
203
204 if (rdev->pm.num_power_states > 2)
205 min_power_state_index = 1;
206
207 switch (rdev->pm.dynpm_planned_action) {
208 case DYNPM_ACTION_MINIMUM:
209 rdev->pm.requested_power_state_index = min_power_state_index;
210 rdev->pm.requested_clock_mode_index = 0;
211 rdev->pm.dynpm_can_downclock = false;
212 break;
213 case DYNPM_ACTION_DOWNCLOCK:
214 if (rdev->pm.current_power_state_index == min_power_state_index) {
215 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
216 rdev->pm.dynpm_can_downclock = false;
217 } else {
218 if (rdev->pm.active_crtc_count > 1) {
219 for (i = 0; i < rdev->pm.num_power_states; i++) {
220 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
221 continue;
222 else if (i >= rdev->pm.current_power_state_index) {
223 rdev->pm.requested_power_state_index =
224 rdev->pm.current_power_state_index;
225 break;
226 } else {
227 rdev->pm.requested_power_state_index = i;
228 break;
229 }
230 }
231 } else {
232 if (rdev->pm.current_power_state_index == 0)
233 rdev->pm.requested_power_state_index =
234 rdev->pm.num_power_states - 1;
235 else
236 rdev->pm.requested_power_state_index =
237 rdev->pm.current_power_state_index - 1;
238 }
239 }
240 rdev->pm.requested_clock_mode_index = 0;
241 /* don't use the power state if crtcs are active and no display flag is set */
242 if ((rdev->pm.active_crtc_count > 0) &&
243 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
244 clock_info[rdev->pm.requested_clock_mode_index].flags &
245 RADEON_PM_MODE_NO_DISPLAY)) {
246 rdev->pm.requested_power_state_index++;
247 }
248 break;
249 case DYNPM_ACTION_UPCLOCK:
250 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
251 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
252 rdev->pm.dynpm_can_upclock = false;
253 } else {
254 if (rdev->pm.active_crtc_count > 1) {
255 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
256 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
257 continue;
258 else if (i <= rdev->pm.current_power_state_index) {
259 rdev->pm.requested_power_state_index =
260 rdev->pm.current_power_state_index;
261 break;
262 } else {
263 rdev->pm.requested_power_state_index = i;
264 break;
265 }
266 }
267 } else
268 rdev->pm.requested_power_state_index =
269 rdev->pm.current_power_state_index + 1;
270 }
271 rdev->pm.requested_clock_mode_index = 0;
272 break;
273 case DYNPM_ACTION_DEFAULT:
274 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
275 rdev->pm.requested_clock_mode_index = 0;
276 rdev->pm.dynpm_can_upclock = false;
277 break;
278 case DYNPM_ACTION_NONE:
279 default:
280 DRM_ERROR("Requested mode for not defined action\n");
281 return;
282 }
283 } else {
284 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
285 /* for now just select the first power state and switch between clock modes */
286 /* power state array is low to high, default is first (0) */
287 if (rdev->pm.active_crtc_count > 1) {
288 rdev->pm.requested_power_state_index = -1;
289 /* start at 1 as we don't want the default mode */
290 for (i = 1; i < rdev->pm.num_power_states; i++) {
291 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
292 continue;
293 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
294 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
295 rdev->pm.requested_power_state_index = i;
296 break;
297 }
298 }
299 /* if nothing selected, grab the default state. */
300 if (rdev->pm.requested_power_state_index == -1)
301 rdev->pm.requested_power_state_index = 0;
302 } else
303 rdev->pm.requested_power_state_index = 1;
304
305 switch (rdev->pm.dynpm_planned_action) {
306 case DYNPM_ACTION_MINIMUM:
307 rdev->pm.requested_clock_mode_index = 0;
308 rdev->pm.dynpm_can_downclock = false;
309 break;
310 case DYNPM_ACTION_DOWNCLOCK:
311 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
312 if (rdev->pm.current_clock_mode_index == 0) {
313 rdev->pm.requested_clock_mode_index = 0;
314 rdev->pm.dynpm_can_downclock = false;
315 } else
316 rdev->pm.requested_clock_mode_index =
317 rdev->pm.current_clock_mode_index - 1;
318 } else {
319 rdev->pm.requested_clock_mode_index = 0;
320 rdev->pm.dynpm_can_downclock = false;
321 }
322 /* don't use the power state if crtcs are active and no display flag is set */
323 if ((rdev->pm.active_crtc_count > 0) &&
324 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
325 clock_info[rdev->pm.requested_clock_mode_index].flags &
326 RADEON_PM_MODE_NO_DISPLAY)) {
327 rdev->pm.requested_clock_mode_index++;
328 }
329 break;
330 case DYNPM_ACTION_UPCLOCK:
331 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
332 if (rdev->pm.current_clock_mode_index ==
333 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
334 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
335 rdev->pm.dynpm_can_upclock = false;
336 } else
337 rdev->pm.requested_clock_mode_index =
338 rdev->pm.current_clock_mode_index + 1;
339 } else {
340 rdev->pm.requested_clock_mode_index =
341 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
342 rdev->pm.dynpm_can_upclock = false;
343 }
344 break;
345 case DYNPM_ACTION_DEFAULT:
346 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
347 rdev->pm.requested_clock_mode_index = 0;
348 rdev->pm.dynpm_can_upclock = false;
349 break;
350 case DYNPM_ACTION_NONE:
351 default:
352 DRM_ERROR("Requested mode for not defined action\n");
353 return;
354 }
355 }
356
357 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
358 rdev->pm.power_state[rdev->pm.requested_power_state_index].
359 clock_info[rdev->pm.requested_clock_mode_index].sclk,
360 rdev->pm.power_state[rdev->pm.requested_power_state_index].
361 clock_info[rdev->pm.requested_clock_mode_index].mclk,
362 rdev->pm.power_state[rdev->pm.requested_power_state_index].
363 pcie_lanes);
364 }
365
366 void rs780_pm_init_profile(struct radeon_device *rdev)
367 {
368 if (rdev->pm.num_power_states == 2) {
369 /* default */
370 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
371 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
372 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
373 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
374 /* low sh */
375 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
376 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
377 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
378 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
379 /* mid sh */
380 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
381 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
382 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
383 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
384 /* high sh */
385 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
386 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
387 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
388 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
389 /* low mh */
390 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
391 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
392 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
393 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
394 /* mid mh */
395 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
396 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
397 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
398 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
399 /* high mh */
400 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
401 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
402 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
403 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
404 } else if (rdev->pm.num_power_states == 3) {
405 /* default */
406 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
407 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
408 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
409 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
410 /* low sh */
411 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
412 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
413 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
414 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
415 /* mid sh */
416 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
417 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
418 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
419 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
420 /* high sh */
421 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
422 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
423 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
424 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
425 /* low mh */
426 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
427 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
428 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
429 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
430 /* mid mh */
431 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
432 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
433 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
434 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
435 /* high mh */
436 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
437 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
438 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
439 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
440 } else {
441 /* default */
442 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
443 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
444 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
445 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
446 /* low sh */
447 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
448 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
449 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
450 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
451 /* mid sh */
452 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
453 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
454 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
455 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
456 /* high sh */
457 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
458 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
459 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
460 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
461 /* low mh */
462 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
463 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
464 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
465 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
466 /* mid mh */
467 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
468 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
469 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
470 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
471 /* high mh */
472 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
473 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
474 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
475 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
476 }
477 }
478
479 void r600_pm_init_profile(struct radeon_device *rdev)
480 {
481 int idx;
482
483 if (rdev->family == CHIP_R600) {
484 /* XXX */
485 /* default */
486 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
487 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
488 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
489 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
490 /* low sh */
491 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
492 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
493 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
494 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
495 /* mid sh */
496 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
497 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
498 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
499 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
500 /* high sh */
501 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
502 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
503 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
504 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
505 /* low mh */
506 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
507 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
508 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
509 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
510 /* mid mh */
511 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
512 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
513 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
514 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
515 /* high mh */
516 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
517 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
518 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
519 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
520 } else {
521 if (rdev->pm.num_power_states < 4) {
522 /* default */
523 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
524 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
525 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
526 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
527 /* low sh */
528 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
529 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
530 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
531 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
532 /* mid sh */
533 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
534 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
535 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
536 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
537 /* high sh */
538 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
539 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
540 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
541 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
542 /* low mh */
543 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
544 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
545 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
546 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
547 /* low mh */
548 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
549 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
550 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
551 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
552 /* high mh */
553 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
554 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
555 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
556 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
557 } else {
558 /* default */
559 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
560 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
561 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
562 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
563 /* low sh */
564 if (rdev->flags & RADEON_IS_MOBILITY)
565 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
566 else
567 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
568 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
569 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
570 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
571 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
572 /* mid sh */
573 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
574 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
575 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
576 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
577 /* high sh */
578 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
579 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
580 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
581 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
582 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
583 /* low mh */
584 if (rdev->flags & RADEON_IS_MOBILITY)
585 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
586 else
587 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
588 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
589 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
590 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
591 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
592 /* mid mh */
593 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
594 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
595 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
596 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
597 /* high mh */
598 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
599 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
600 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
601 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
602 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
603 }
604 }
605 }
606
607 void r600_pm_misc(struct radeon_device *rdev)
608 {
609 int req_ps_idx = rdev->pm.requested_power_state_index;
610 int req_cm_idx = rdev->pm.requested_clock_mode_index;
611 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
612 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
613
614 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
615 /* 0xff01 is a flag rather then an actual voltage */
616 if (voltage->voltage == 0xff01)
617 return;
618 if (voltage->voltage != rdev->pm.current_vddc) {
619 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
620 rdev->pm.current_vddc = voltage->voltage;
621 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
622 }
623 }
624 }
625
626 bool r600_gui_idle(struct radeon_device *rdev)
627 {
628 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
629 return false;
630 else
631 return true;
632 }
633
634 /* hpd for digital panel detect/disconnect */
635 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
636 {
637 bool connected = false;
638
639 if (ASIC_IS_DCE3(rdev)) {
640 switch (hpd) {
641 case RADEON_HPD_1:
642 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
643 connected = true;
644 break;
645 case RADEON_HPD_2:
646 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
647 connected = true;
648 break;
649 case RADEON_HPD_3:
650 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
651 connected = true;
652 break;
653 case RADEON_HPD_4:
654 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
655 connected = true;
656 break;
657 /* DCE 3.2 */
658 case RADEON_HPD_5:
659 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
660 connected = true;
661 break;
662 case RADEON_HPD_6:
663 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
664 connected = true;
665 break;
666 default:
667 break;
668 }
669 } else {
670 switch (hpd) {
671 case RADEON_HPD_1:
672 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
673 connected = true;
674 break;
675 case RADEON_HPD_2:
676 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
677 connected = true;
678 break;
679 case RADEON_HPD_3:
680 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
681 connected = true;
682 break;
683 default:
684 break;
685 }
686 }
687 return connected;
688 }
689
690 void r600_hpd_set_polarity(struct radeon_device *rdev,
691 enum radeon_hpd_id hpd)
692 {
693 u32 tmp;
694 bool connected = r600_hpd_sense(rdev, hpd);
695
696 if (ASIC_IS_DCE3(rdev)) {
697 switch (hpd) {
698 case RADEON_HPD_1:
699 tmp = RREG32(DC_HPD1_INT_CONTROL);
700 if (connected)
701 tmp &= ~DC_HPDx_INT_POLARITY;
702 else
703 tmp |= DC_HPDx_INT_POLARITY;
704 WREG32(DC_HPD1_INT_CONTROL, tmp);
705 break;
706 case RADEON_HPD_2:
707 tmp = RREG32(DC_HPD2_INT_CONTROL);
708 if (connected)
709 tmp &= ~DC_HPDx_INT_POLARITY;
710 else
711 tmp |= DC_HPDx_INT_POLARITY;
712 WREG32(DC_HPD2_INT_CONTROL, tmp);
713 break;
714 case RADEON_HPD_3:
715 tmp = RREG32(DC_HPD3_INT_CONTROL);
716 if (connected)
717 tmp &= ~DC_HPDx_INT_POLARITY;
718 else
719 tmp |= DC_HPDx_INT_POLARITY;
720 WREG32(DC_HPD3_INT_CONTROL, tmp);
721 break;
722 case RADEON_HPD_4:
723 tmp = RREG32(DC_HPD4_INT_CONTROL);
724 if (connected)
725 tmp &= ~DC_HPDx_INT_POLARITY;
726 else
727 tmp |= DC_HPDx_INT_POLARITY;
728 WREG32(DC_HPD4_INT_CONTROL, tmp);
729 break;
730 case RADEON_HPD_5:
731 tmp = RREG32(DC_HPD5_INT_CONTROL);
732 if (connected)
733 tmp &= ~DC_HPDx_INT_POLARITY;
734 else
735 tmp |= DC_HPDx_INT_POLARITY;
736 WREG32(DC_HPD5_INT_CONTROL, tmp);
737 break;
738 /* DCE 3.2 */
739 case RADEON_HPD_6:
740 tmp = RREG32(DC_HPD6_INT_CONTROL);
741 if (connected)
742 tmp &= ~DC_HPDx_INT_POLARITY;
743 else
744 tmp |= DC_HPDx_INT_POLARITY;
745 WREG32(DC_HPD6_INT_CONTROL, tmp);
746 break;
747 default:
748 break;
749 }
750 } else {
751 switch (hpd) {
752 case RADEON_HPD_1:
753 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
754 if (connected)
755 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
756 else
757 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
758 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
759 break;
760 case RADEON_HPD_2:
761 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
762 if (connected)
763 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
764 else
765 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
766 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
767 break;
768 case RADEON_HPD_3:
769 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
770 if (connected)
771 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
772 else
773 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
774 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
775 break;
776 default:
777 break;
778 }
779 }
780 }
781
782 void r600_hpd_init(struct radeon_device *rdev)
783 {
784 struct drm_device *dev = rdev->ddev;
785 struct drm_connector *connector;
786 unsigned enable = 0;
787
788 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
789 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
790
791 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
792 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
793 /* don't try to enable hpd on eDP or LVDS avoid breaking the
794 * aux dp channel on imac and help (but not completely fix)
795 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
796 */
797 continue;
798 }
799 if (ASIC_IS_DCE3(rdev)) {
800 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
801 if (ASIC_IS_DCE32(rdev))
802 tmp |= DC_HPDx_EN;
803
804 switch (radeon_connector->hpd.hpd) {
805 case RADEON_HPD_1:
806 WREG32(DC_HPD1_CONTROL, tmp);
807 break;
808 case RADEON_HPD_2:
809 WREG32(DC_HPD2_CONTROL, tmp);
810 break;
811 case RADEON_HPD_3:
812 WREG32(DC_HPD3_CONTROL, tmp);
813 break;
814 case RADEON_HPD_4:
815 WREG32(DC_HPD4_CONTROL, tmp);
816 break;
817 /* DCE 3.2 */
818 case RADEON_HPD_5:
819 WREG32(DC_HPD5_CONTROL, tmp);
820 break;
821 case RADEON_HPD_6:
822 WREG32(DC_HPD6_CONTROL, tmp);
823 break;
824 default:
825 break;
826 }
827 } else {
828 switch (radeon_connector->hpd.hpd) {
829 case RADEON_HPD_1:
830 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
831 break;
832 case RADEON_HPD_2:
833 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
834 break;
835 case RADEON_HPD_3:
836 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
837 break;
838 default:
839 break;
840 }
841 }
842 enable |= 1 << radeon_connector->hpd.hpd;
843 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
844 }
845 radeon_irq_kms_enable_hpd(rdev, enable);
846 }
847
848 void r600_hpd_fini(struct radeon_device *rdev)
849 {
850 struct drm_device *dev = rdev->ddev;
851 struct drm_connector *connector;
852 unsigned disable = 0;
853
854 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
855 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
856 if (ASIC_IS_DCE3(rdev)) {
857 switch (radeon_connector->hpd.hpd) {
858 case RADEON_HPD_1:
859 WREG32(DC_HPD1_CONTROL, 0);
860 break;
861 case RADEON_HPD_2:
862 WREG32(DC_HPD2_CONTROL, 0);
863 break;
864 case RADEON_HPD_3:
865 WREG32(DC_HPD3_CONTROL, 0);
866 break;
867 case RADEON_HPD_4:
868 WREG32(DC_HPD4_CONTROL, 0);
869 break;
870 /* DCE 3.2 */
871 case RADEON_HPD_5:
872 WREG32(DC_HPD5_CONTROL, 0);
873 break;
874 case RADEON_HPD_6:
875 WREG32(DC_HPD6_CONTROL, 0);
876 break;
877 default:
878 break;
879 }
880 } else {
881 switch (radeon_connector->hpd.hpd) {
882 case RADEON_HPD_1:
883 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
884 break;
885 case RADEON_HPD_2:
886 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
887 break;
888 case RADEON_HPD_3:
889 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
890 break;
891 default:
892 break;
893 }
894 }
895 disable |= 1 << radeon_connector->hpd.hpd;
896 }
897 radeon_irq_kms_disable_hpd(rdev, disable);
898 }
899
900 /*
901 * R600 PCIE GART
902 */
903 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
904 {
905 unsigned i;
906 u32 tmp;
907
908 /* flush hdp cache so updates hit vram */
909 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
910 !(rdev->flags & RADEON_IS_AGP)) {
911 void __iomem *ptr = (void *)rdev->gart.ptr;
912 u32 tmp;
913
914 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
915 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
916 * This seems to cause problems on some AGP cards. Just use the old
917 * method for them.
918 */
919 WREG32(HDP_DEBUG1, 0);
920 tmp = readl((void __iomem *)ptr);
921 } else
922 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
923
924 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
925 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
926 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
927 for (i = 0; i < rdev->usec_timeout; i++) {
928 /* read MC_STATUS */
929 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
930 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
931 if (tmp == 2) {
932 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
933 return;
934 }
935 if (tmp) {
936 return;
937 }
938 udelay(1);
939 }
940 }
941
942 int r600_pcie_gart_init(struct radeon_device *rdev)
943 {
944 int r;
945
946 if (rdev->gart.robj) {
947 WARN(1, "R600 PCIE GART already initialized\n");
948 return 0;
949 }
950 /* Initialize common gart structure */
951 r = radeon_gart_init(rdev);
952 if (r)
953 return r;
954 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
955 return radeon_gart_table_vram_alloc(rdev);
956 }
957
958 static int r600_pcie_gart_enable(struct radeon_device *rdev)
959 {
960 u32 tmp;
961 int r, i;
962
963 if (rdev->gart.robj == NULL) {
964 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
965 return -EINVAL;
966 }
967 r = radeon_gart_table_vram_pin(rdev);
968 if (r)
969 return r;
970 radeon_gart_restore(rdev);
971
972 /* Setup L2 cache */
973 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
974 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
975 EFFECTIVE_L2_QUEUE_SIZE(7));
976 WREG32(VM_L2_CNTL2, 0);
977 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
978 /* Setup TLB control */
979 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
980 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
981 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
982 ENABLE_WAIT_L2_QUERY;
983 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
984 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
985 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
986 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
987 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
988 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
989 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
990 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
991 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
992 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
993 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
994 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
995 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
996 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
997 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
998 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
999 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1000 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1001 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1002 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1003 (u32)(rdev->dummy_page.addr >> 12));
1004 for (i = 1; i < 7; i++)
1005 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1006
1007 r600_pcie_gart_tlb_flush(rdev);
1008 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1009 (unsigned)(rdev->mc.gtt_size >> 20),
1010 (unsigned long long)rdev->gart.table_addr);
1011 rdev->gart.ready = true;
1012 return 0;
1013 }
1014
1015 static void r600_pcie_gart_disable(struct radeon_device *rdev)
1016 {
1017 u32 tmp;
1018 int i;
1019
1020 /* Disable all tables */
1021 for (i = 0; i < 7; i++)
1022 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1023
1024 /* Disable L2 cache */
1025 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1026 EFFECTIVE_L2_QUEUE_SIZE(7));
1027 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1028 /* Setup L1 TLB control */
1029 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1030 ENABLE_WAIT_L2_QUERY;
1031 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1032 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1033 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1034 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1035 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1036 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1037 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1038 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1039 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1040 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1041 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1042 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1043 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1044 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1045 radeon_gart_table_vram_unpin(rdev);
1046 }
1047
1048 static void r600_pcie_gart_fini(struct radeon_device *rdev)
1049 {
1050 radeon_gart_fini(rdev);
1051 r600_pcie_gart_disable(rdev);
1052 radeon_gart_table_vram_free(rdev);
1053 }
1054
1055 static void r600_agp_enable(struct radeon_device *rdev)
1056 {
1057 u32 tmp;
1058 int i;
1059
1060 /* Setup L2 cache */
1061 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1062 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1063 EFFECTIVE_L2_QUEUE_SIZE(7));
1064 WREG32(VM_L2_CNTL2, 0);
1065 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1066 /* Setup TLB control */
1067 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1068 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1069 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1070 ENABLE_WAIT_L2_QUERY;
1071 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1072 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1073 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1074 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1075 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1076 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1077 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1078 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1079 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1080 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1081 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1082 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1083 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1084 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1085 for (i = 0; i < 7; i++)
1086 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1087 }
1088
1089 int r600_mc_wait_for_idle(struct radeon_device *rdev)
1090 {
1091 unsigned i;
1092 u32 tmp;
1093
1094 for (i = 0; i < rdev->usec_timeout; i++) {
1095 /* read MC_STATUS */
1096 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1097 if (!tmp)
1098 return 0;
1099 udelay(1);
1100 }
1101 return -1;
1102 }
1103
1104 uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1105 {
1106 unsigned long flags;
1107 uint32_t r;
1108
1109 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1110 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1111 r = RREG32(R_0028FC_MC_DATA);
1112 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1113 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1114 return r;
1115 }
1116
1117 void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1118 {
1119 unsigned long flags;
1120
1121 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1122 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1123 S_0028F8_MC_IND_WR_EN(1));
1124 WREG32(R_0028FC_MC_DATA, v);
1125 WREG32(R_0028F8_MC_INDEX, 0x7F);
1126 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1127 }
1128
1129 static void r600_mc_program(struct radeon_device *rdev)
1130 {
1131 struct rv515_mc_save save;
1132 u32 tmp;
1133 int i, j;
1134
1135 /* Initialize HDP */
1136 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1137 WREG32((0x2c14 + j), 0x00000000);
1138 WREG32((0x2c18 + j), 0x00000000);
1139 WREG32((0x2c1c + j), 0x00000000);
1140 WREG32((0x2c20 + j), 0x00000000);
1141 WREG32((0x2c24 + j), 0x00000000);
1142 }
1143 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1144
1145 rv515_mc_stop(rdev, &save);
1146 if (r600_mc_wait_for_idle(rdev)) {
1147 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1148 }
1149 /* Lockout access through VGA aperture (doesn't exist before R600) */
1150 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1151 /* Update configuration */
1152 if (rdev->flags & RADEON_IS_AGP) {
1153 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1154 /* VRAM before AGP */
1155 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1156 rdev->mc.vram_start >> 12);
1157 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1158 rdev->mc.gtt_end >> 12);
1159 } else {
1160 /* VRAM after AGP */
1161 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1162 rdev->mc.gtt_start >> 12);
1163 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1164 rdev->mc.vram_end >> 12);
1165 }
1166 } else {
1167 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1168 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1169 }
1170 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1171 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1172 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1173 WREG32(MC_VM_FB_LOCATION, tmp);
1174 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1175 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1176 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1177 if (rdev->flags & RADEON_IS_AGP) {
1178 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1179 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1180 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1181 } else {
1182 WREG32(MC_VM_AGP_BASE, 0);
1183 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1184 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1185 }
1186 if (r600_mc_wait_for_idle(rdev)) {
1187 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1188 }
1189 rv515_mc_resume(rdev, &save);
1190 /* we need to own VRAM, so turn off the VGA renderer here
1191 * to stop it overwriting our objects */
1192 rv515_vga_render_disable(rdev);
1193 }
1194
1195 /**
1196 * r600_vram_gtt_location - try to find VRAM & GTT location
1197 * @rdev: radeon device structure holding all necessary informations
1198 * @mc: memory controller structure holding memory informations
1199 *
1200 * Function will place try to place VRAM at same place as in CPU (PCI)
1201 * address space as some GPU seems to have issue when we reprogram at
1202 * different address space.
1203 *
1204 * If there is not enough space to fit the unvisible VRAM after the
1205 * aperture then we limit the VRAM size to the aperture.
1206 *
1207 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1208 * them to be in one from GPU point of view so that we can program GPU to
1209 * catch access outside them (weird GPU policy see ??).
1210 *
1211 * This function will never fails, worst case are limiting VRAM or GTT.
1212 *
1213 * Note: GTT start, end, size should be initialized before calling this
1214 * function on AGP platform.
1215 */
1216 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1217 {
1218 u64 size_bf, size_af;
1219
1220 if (mc->mc_vram_size > 0xE0000000) {
1221 /* leave room for at least 512M GTT */
1222 dev_warn(rdev->dev, "limiting VRAM\n");
1223 mc->real_vram_size = 0xE0000000;
1224 mc->mc_vram_size = 0xE0000000;
1225 }
1226 if (rdev->flags & RADEON_IS_AGP) {
1227 size_bf = mc->gtt_start;
1228 size_af = mc->mc_mask - mc->gtt_end;
1229 if (size_bf > size_af) {
1230 if (mc->mc_vram_size > size_bf) {
1231 dev_warn(rdev->dev, "limiting VRAM\n");
1232 mc->real_vram_size = size_bf;
1233 mc->mc_vram_size = size_bf;
1234 }
1235 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1236 } else {
1237 if (mc->mc_vram_size > size_af) {
1238 dev_warn(rdev->dev, "limiting VRAM\n");
1239 mc->real_vram_size = size_af;
1240 mc->mc_vram_size = size_af;
1241 }
1242 mc->vram_start = mc->gtt_end + 1;
1243 }
1244 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1245 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1246 mc->mc_vram_size >> 20, mc->vram_start,
1247 mc->vram_end, mc->real_vram_size >> 20);
1248 } else {
1249 u64 base = 0;
1250 if (rdev->flags & RADEON_IS_IGP) {
1251 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1252 base <<= 24;
1253 }
1254 radeon_vram_location(rdev, &rdev->mc, base);
1255 rdev->mc.gtt_base_align = 0;
1256 radeon_gtt_location(rdev, mc);
1257 }
1258 }
1259
1260 static int r600_mc_init(struct radeon_device *rdev)
1261 {
1262 u32 tmp;
1263 int chansize, numchan;
1264 uint32_t h_addr, l_addr;
1265 unsigned long long k8_addr;
1266
1267 /* Get VRAM informations */
1268 rdev->mc.vram_is_ddr = true;
1269 tmp = RREG32(RAMCFG);
1270 if (tmp & CHANSIZE_OVERRIDE) {
1271 chansize = 16;
1272 } else if (tmp & CHANSIZE_MASK) {
1273 chansize = 64;
1274 } else {
1275 chansize = 32;
1276 }
1277 tmp = RREG32(CHMAP);
1278 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1279 case 0:
1280 default:
1281 numchan = 1;
1282 break;
1283 case 1:
1284 numchan = 2;
1285 break;
1286 case 2:
1287 numchan = 4;
1288 break;
1289 case 3:
1290 numchan = 8;
1291 break;
1292 }
1293 rdev->mc.vram_width = numchan * chansize;
1294 /* Could aper size report 0 ? */
1295 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1296 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1297 /* Setup GPU memory space */
1298 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1299 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1300 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1301 r600_vram_gtt_location(rdev, &rdev->mc);
1302
1303 if (rdev->flags & RADEON_IS_IGP) {
1304 rs690_pm_info(rdev);
1305 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1306
1307 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
1308 /* Use K8 direct mapping for fast fb access. */
1309 rdev->fastfb_working = false;
1310 h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
1311 l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
1312 k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
1313 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1314 if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
1315 #endif
1316 {
1317 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1318 * memory is present.
1319 */
1320 if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
1321 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1322 (unsigned long long)rdev->mc.aper_base, k8_addr);
1323 rdev->mc.aper_base = (resource_size_t)k8_addr;
1324 rdev->fastfb_working = true;
1325 }
1326 }
1327 }
1328 }
1329
1330 radeon_update_bandwidth_info(rdev);
1331 return 0;
1332 }
1333
1334 int r600_vram_scratch_init(struct radeon_device *rdev)
1335 {
1336 int r;
1337
1338 if (rdev->vram_scratch.robj == NULL) {
1339 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1340 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1341 NULL, &rdev->vram_scratch.robj);
1342 if (r) {
1343 return r;
1344 }
1345 }
1346
1347 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1348 if (unlikely(r != 0))
1349 return r;
1350 r = radeon_bo_pin(rdev->vram_scratch.robj,
1351 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1352 if (r) {
1353 radeon_bo_unreserve(rdev->vram_scratch.robj);
1354 return r;
1355 }
1356 r = radeon_bo_kmap(rdev->vram_scratch.robj,
1357 (void **)&rdev->vram_scratch.ptr);
1358 if (r)
1359 radeon_bo_unpin(rdev->vram_scratch.robj);
1360 radeon_bo_unreserve(rdev->vram_scratch.robj);
1361
1362 return r;
1363 }
1364
1365 void r600_vram_scratch_fini(struct radeon_device *rdev)
1366 {
1367 int r;
1368
1369 if (rdev->vram_scratch.robj == NULL) {
1370 return;
1371 }
1372 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1373 if (likely(r == 0)) {
1374 radeon_bo_kunmap(rdev->vram_scratch.robj);
1375 radeon_bo_unpin(rdev->vram_scratch.robj);
1376 radeon_bo_unreserve(rdev->vram_scratch.robj);
1377 }
1378 radeon_bo_unref(&rdev->vram_scratch.robj);
1379 }
1380
1381 void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
1382 {
1383 u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
1384
1385 if (hung)
1386 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1387 else
1388 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1389
1390 WREG32(R600_BIOS_3_SCRATCH, tmp);
1391 }
1392
1393 static void r600_print_gpu_status_regs(struct radeon_device *rdev)
1394 {
1395 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
1396 RREG32(R_008010_GRBM_STATUS));
1397 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1398 RREG32(R_008014_GRBM_STATUS2));
1399 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
1400 RREG32(R_000E50_SRBM_STATUS));
1401 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1402 RREG32(CP_STALLED_STAT1));
1403 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1404 RREG32(CP_STALLED_STAT2));
1405 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1406 RREG32(CP_BUSY_STAT));
1407 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1408 RREG32(CP_STAT));
1409 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1410 RREG32(DMA_STATUS_REG));
1411 }
1412
1413 static bool r600_is_display_hung(struct radeon_device *rdev)
1414 {
1415 u32 crtc_hung = 0;
1416 u32 crtc_status[2];
1417 u32 i, j, tmp;
1418
1419 for (i = 0; i < rdev->num_crtc; i++) {
1420 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
1421 crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1422 crtc_hung |= (1 << i);
1423 }
1424 }
1425
1426 for (j = 0; j < 10; j++) {
1427 for (i = 0; i < rdev->num_crtc; i++) {
1428 if (crtc_hung & (1 << i)) {
1429 tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1430 if (tmp != crtc_status[i])
1431 crtc_hung &= ~(1 << i);
1432 }
1433 }
1434 if (crtc_hung == 0)
1435 return false;
1436 udelay(100);
1437 }
1438
1439 return true;
1440 }
1441
1442 u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1443 {
1444 u32 reset_mask = 0;
1445 u32 tmp;
1446
1447 /* GRBM_STATUS */
1448 tmp = RREG32(R_008010_GRBM_STATUS);
1449 if (rdev->family >= CHIP_RV770) {
1450 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1451 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1452 G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1453 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1454 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1455 reset_mask |= RADEON_RESET_GFX;
1456 } else {
1457 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1458 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1459 G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1460 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1461 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1462 reset_mask |= RADEON_RESET_GFX;
1463 }
1464
1465 if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
1466 G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
1467 reset_mask |= RADEON_RESET_CP;
1468
1469 if (G_008010_GRBM_EE_BUSY(tmp))
1470 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1471
1472 /* DMA_STATUS_REG */
1473 tmp = RREG32(DMA_STATUS_REG);
1474 if (!(tmp & DMA_IDLE))
1475 reset_mask |= RADEON_RESET_DMA;
1476
1477 /* SRBM_STATUS */
1478 tmp = RREG32(R_000E50_SRBM_STATUS);
1479 if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
1480 reset_mask |= RADEON_RESET_RLC;
1481
1482 if (G_000E50_IH_BUSY(tmp))
1483 reset_mask |= RADEON_RESET_IH;
1484
1485 if (G_000E50_SEM_BUSY(tmp))
1486 reset_mask |= RADEON_RESET_SEM;
1487
1488 if (G_000E50_GRBM_RQ_PENDING(tmp))
1489 reset_mask |= RADEON_RESET_GRBM;
1490
1491 if (G_000E50_VMC_BUSY(tmp))
1492 reset_mask |= RADEON_RESET_VMC;
1493
1494 if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
1495 G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
1496 G_000E50_MCDW_BUSY(tmp))
1497 reset_mask |= RADEON_RESET_MC;
1498
1499 if (r600_is_display_hung(rdev))
1500 reset_mask |= RADEON_RESET_DISPLAY;
1501
1502 /* Skip MC reset as it's mostly likely not hung, just busy */
1503 if (reset_mask & RADEON_RESET_MC) {
1504 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1505 reset_mask &= ~RADEON_RESET_MC;
1506 }
1507
1508 return reset_mask;
1509 }
1510
1511 static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1512 {
1513 struct rv515_mc_save save;
1514 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1515 u32 tmp;
1516
1517 if (reset_mask == 0)
1518 return;
1519
1520 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1521
1522 r600_print_gpu_status_regs(rdev);
1523
1524 /* Disable CP parsing/prefetching */
1525 if (rdev->family >= CHIP_RV770)
1526 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1527 else
1528 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1529
1530 /* disable the RLC */
1531 WREG32(RLC_CNTL, 0);
1532
1533 if (reset_mask & RADEON_RESET_DMA) {
1534 /* Disable DMA */
1535 tmp = RREG32(DMA_RB_CNTL);
1536 tmp &= ~DMA_RB_ENABLE;
1537 WREG32(DMA_RB_CNTL, tmp);
1538 }
1539
1540 mdelay(50);
1541
1542 rv515_mc_stop(rdev, &save);
1543 if (r600_mc_wait_for_idle(rdev)) {
1544 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1545 }
1546
1547 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1548 if (rdev->family >= CHIP_RV770)
1549 grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
1550 S_008020_SOFT_RESET_CB(1) |
1551 S_008020_SOFT_RESET_PA(1) |
1552 S_008020_SOFT_RESET_SC(1) |
1553 S_008020_SOFT_RESET_SPI(1) |
1554 S_008020_SOFT_RESET_SX(1) |
1555 S_008020_SOFT_RESET_SH(1) |
1556 S_008020_SOFT_RESET_TC(1) |
1557 S_008020_SOFT_RESET_TA(1) |
1558 S_008020_SOFT_RESET_VC(1) |
1559 S_008020_SOFT_RESET_VGT(1);
1560 else
1561 grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
1562 S_008020_SOFT_RESET_DB(1) |
1563 S_008020_SOFT_RESET_CB(1) |
1564 S_008020_SOFT_RESET_PA(1) |
1565 S_008020_SOFT_RESET_SC(1) |
1566 S_008020_SOFT_RESET_SMX(1) |
1567 S_008020_SOFT_RESET_SPI(1) |
1568 S_008020_SOFT_RESET_SX(1) |
1569 S_008020_SOFT_RESET_SH(1) |
1570 S_008020_SOFT_RESET_TC(1) |
1571 S_008020_SOFT_RESET_TA(1) |
1572 S_008020_SOFT_RESET_VC(1) |
1573 S_008020_SOFT_RESET_VGT(1);
1574 }
1575
1576 if (reset_mask & RADEON_RESET_CP) {
1577 grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
1578 S_008020_SOFT_RESET_VGT(1);
1579
1580 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1581 }
1582
1583 if (reset_mask & RADEON_RESET_DMA) {
1584 if (rdev->family >= CHIP_RV770)
1585 srbm_soft_reset |= RV770_SOFT_RESET_DMA;
1586 else
1587 srbm_soft_reset |= SOFT_RESET_DMA;
1588 }
1589
1590 if (reset_mask & RADEON_RESET_RLC)
1591 srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
1592
1593 if (reset_mask & RADEON_RESET_SEM)
1594 srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
1595
1596 if (reset_mask & RADEON_RESET_IH)
1597 srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
1598
1599 if (reset_mask & RADEON_RESET_GRBM)
1600 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1601
1602 if (!(rdev->flags & RADEON_IS_IGP)) {
1603 if (reset_mask & RADEON_RESET_MC)
1604 srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
1605 }
1606
1607 if (reset_mask & RADEON_RESET_VMC)
1608 srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
1609
1610 if (grbm_soft_reset) {
1611 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1612 tmp |= grbm_soft_reset;
1613 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1614 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1615 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1616
1617 udelay(50);
1618
1619 tmp &= ~grbm_soft_reset;
1620 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1621 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1622 }
1623
1624 if (srbm_soft_reset) {
1625 tmp = RREG32(SRBM_SOFT_RESET);
1626 tmp |= srbm_soft_reset;
1627 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1628 WREG32(SRBM_SOFT_RESET, tmp);
1629 tmp = RREG32(SRBM_SOFT_RESET);
1630
1631 udelay(50);
1632
1633 tmp &= ~srbm_soft_reset;
1634 WREG32(SRBM_SOFT_RESET, tmp);
1635 tmp = RREG32(SRBM_SOFT_RESET);
1636 }
1637
1638 /* Wait a little for things to settle down */
1639 mdelay(1);
1640
1641 rv515_mc_resume(rdev, &save);
1642 udelay(50);
1643
1644 r600_print_gpu_status_regs(rdev);
1645 }
1646
1647 int r600_asic_reset(struct radeon_device *rdev)
1648 {
1649 u32 reset_mask;
1650
1651 reset_mask = r600_gpu_check_soft_reset(rdev);
1652
1653 if (reset_mask)
1654 r600_set_bios_scratch_engine_hung(rdev, true);
1655
1656 r600_gpu_soft_reset(rdev, reset_mask);
1657
1658 reset_mask = r600_gpu_check_soft_reset(rdev);
1659
1660 if (!reset_mask)
1661 r600_set_bios_scratch_engine_hung(rdev, false);
1662
1663 return 0;
1664 }
1665
1666 /**
1667 * r600_gfx_is_lockup - Check if the GFX engine is locked up
1668 *
1669 * @rdev: radeon_device pointer
1670 * @ring: radeon_ring structure holding ring information
1671 *
1672 * Check if the GFX engine is locked up.
1673 * Returns true if the engine appears to be locked up, false if not.
1674 */
1675 bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1676 {
1677 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1678
1679 if (!(reset_mask & (RADEON_RESET_GFX |
1680 RADEON_RESET_COMPUTE |
1681 RADEON_RESET_CP))) {
1682 radeon_ring_lockup_update(ring);
1683 return false;
1684 }
1685 /* force CP activities */
1686 radeon_ring_force_activity(rdev, ring);
1687 return radeon_ring_test_lockup(rdev, ring);
1688 }
1689
1690 u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1691 u32 tiling_pipe_num,
1692 u32 max_rb_num,
1693 u32 total_max_rb_num,
1694 u32 disabled_rb_mask)
1695 {
1696 u32 rendering_pipe_num, rb_num_width, req_rb_num;
1697 u32 pipe_rb_ratio, pipe_rb_remain, tmp;
1698 u32 data = 0, mask = 1 << (max_rb_num - 1);
1699 unsigned i, j;
1700
1701 /* mask out the RBs that don't exist on that asic */
1702 tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
1703 /* make sure at least one RB is available */
1704 if ((tmp & 0xff) != 0xff)
1705 disabled_rb_mask = tmp;
1706
1707 rendering_pipe_num = 1 << tiling_pipe_num;
1708 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1709 BUG_ON(rendering_pipe_num < req_rb_num);
1710
1711 pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1712 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1713
1714 if (rdev->family <= CHIP_RV740) {
1715 /* r6xx/r7xx */
1716 rb_num_width = 2;
1717 } else {
1718 /* eg+ */
1719 rb_num_width = 4;
1720 }
1721
1722 for (i = 0; i < max_rb_num; i++) {
1723 if (!(mask & disabled_rb_mask)) {
1724 for (j = 0; j < pipe_rb_ratio; j++) {
1725 data <<= rb_num_width;
1726 data |= max_rb_num - i - 1;
1727 }
1728 if (pipe_rb_remain) {
1729 data <<= rb_num_width;
1730 data |= max_rb_num - i - 1;
1731 pipe_rb_remain--;
1732 }
1733 }
1734 mask >>= 1;
1735 }
1736
1737 return data;
1738 }
1739
1740 int r600_count_pipe_bits(uint32_t val)
1741 {
1742 return hweight32(val);
1743 }
1744
1745 static void r600_gpu_init(struct radeon_device *rdev)
1746 {
1747 u32 tiling_config;
1748 u32 ramcfg;
1749 u32 cc_rb_backend_disable;
1750 u32 cc_gc_shader_pipe_config;
1751 u32 tmp;
1752 int i, j;
1753 u32 sq_config;
1754 u32 sq_gpr_resource_mgmt_1 = 0;
1755 u32 sq_gpr_resource_mgmt_2 = 0;
1756 u32 sq_thread_resource_mgmt = 0;
1757 u32 sq_stack_resource_mgmt_1 = 0;
1758 u32 sq_stack_resource_mgmt_2 = 0;
1759 u32 disabled_rb_mask;
1760
1761 rdev->config.r600.tiling_group_size = 256;
1762 switch (rdev->family) {
1763 case CHIP_R600:
1764 rdev->config.r600.max_pipes = 4;
1765 rdev->config.r600.max_tile_pipes = 8;
1766 rdev->config.r600.max_simds = 4;
1767 rdev->config.r600.max_backends = 4;
1768 rdev->config.r600.max_gprs = 256;
1769 rdev->config.r600.max_threads = 192;
1770 rdev->config.r600.max_stack_entries = 256;
1771 rdev->config.r600.max_hw_contexts = 8;
1772 rdev->config.r600.max_gs_threads = 16;
1773 rdev->config.r600.sx_max_export_size = 128;
1774 rdev->config.r600.sx_max_export_pos_size = 16;
1775 rdev->config.r600.sx_max_export_smx_size = 128;
1776 rdev->config.r600.sq_num_cf_insts = 2;
1777 break;
1778 case CHIP_RV630:
1779 case CHIP_RV635:
1780 rdev->config.r600.max_pipes = 2;
1781 rdev->config.r600.max_tile_pipes = 2;
1782 rdev->config.r600.max_simds = 3;
1783 rdev->config.r600.max_backends = 1;
1784 rdev->config.r600.max_gprs = 128;
1785 rdev->config.r600.max_threads = 192;
1786 rdev->config.r600.max_stack_entries = 128;
1787 rdev->config.r600.max_hw_contexts = 8;
1788 rdev->config.r600.max_gs_threads = 4;
1789 rdev->config.r600.sx_max_export_size = 128;
1790 rdev->config.r600.sx_max_export_pos_size = 16;
1791 rdev->config.r600.sx_max_export_smx_size = 128;
1792 rdev->config.r600.sq_num_cf_insts = 2;
1793 break;
1794 case CHIP_RV610:
1795 case CHIP_RV620:
1796 case CHIP_RS780:
1797 case CHIP_RS880:
1798 rdev->config.r600.max_pipes = 1;
1799 rdev->config.r600.max_tile_pipes = 1;
1800 rdev->config.r600.max_simds = 2;
1801 rdev->config.r600.max_backends = 1;
1802 rdev->config.r600.max_gprs = 128;
1803 rdev->config.r600.max_threads = 192;
1804 rdev->config.r600.max_stack_entries = 128;
1805 rdev->config.r600.max_hw_contexts = 4;
1806 rdev->config.r600.max_gs_threads = 4;
1807 rdev->config.r600.sx_max_export_size = 128;
1808 rdev->config.r600.sx_max_export_pos_size = 16;
1809 rdev->config.r600.sx_max_export_smx_size = 128;
1810 rdev->config.r600.sq_num_cf_insts = 1;
1811 break;
1812 case CHIP_RV670:
1813 rdev->config.r600.max_pipes = 4;
1814 rdev->config.r600.max_tile_pipes = 4;
1815 rdev->config.r600.max_simds = 4;
1816 rdev->config.r600.max_backends = 4;
1817 rdev->config.r600.max_gprs = 192;
1818 rdev->config.r600.max_threads = 192;
1819 rdev->config.r600.max_stack_entries = 256;
1820 rdev->config.r600.max_hw_contexts = 8;
1821 rdev->config.r600.max_gs_threads = 16;
1822 rdev->config.r600.sx_max_export_size = 128;
1823 rdev->config.r600.sx_max_export_pos_size = 16;
1824 rdev->config.r600.sx_max_export_smx_size = 128;
1825 rdev->config.r600.sq_num_cf_insts = 2;
1826 break;
1827 default:
1828 break;
1829 }
1830
1831 /* Initialize HDP */
1832 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1833 WREG32((0x2c14 + j), 0x00000000);
1834 WREG32((0x2c18 + j), 0x00000000);
1835 WREG32((0x2c1c + j), 0x00000000);
1836 WREG32((0x2c20 + j), 0x00000000);
1837 WREG32((0x2c24 + j), 0x00000000);
1838 }
1839
1840 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1841
1842 /* Setup tiling */
1843 tiling_config = 0;
1844 ramcfg = RREG32(RAMCFG);
1845 switch (rdev->config.r600.max_tile_pipes) {
1846 case 1:
1847 tiling_config |= PIPE_TILING(0);
1848 break;
1849 case 2:
1850 tiling_config |= PIPE_TILING(1);
1851 break;
1852 case 4:
1853 tiling_config |= PIPE_TILING(2);
1854 break;
1855 case 8:
1856 tiling_config |= PIPE_TILING(3);
1857 break;
1858 default:
1859 break;
1860 }
1861 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1862 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1863 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1864 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1865
1866 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1867 if (tmp > 3) {
1868 tiling_config |= ROW_TILING(3);
1869 tiling_config |= SAMPLE_SPLIT(3);
1870 } else {
1871 tiling_config |= ROW_TILING(tmp);
1872 tiling_config |= SAMPLE_SPLIT(tmp);
1873 }
1874 tiling_config |= BANK_SWAPS(1);
1875
1876 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1877 tmp = R6XX_MAX_BACKENDS -
1878 r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
1879 if (tmp < rdev->config.r600.max_backends) {
1880 rdev->config.r600.max_backends = tmp;
1881 }
1882
1883 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
1884 tmp = R6XX_MAX_PIPES -
1885 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
1886 if (tmp < rdev->config.r600.max_pipes) {
1887 rdev->config.r600.max_pipes = tmp;
1888 }
1889 tmp = R6XX_MAX_SIMDS -
1890 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1891 if (tmp < rdev->config.r600.max_simds) {
1892 rdev->config.r600.max_simds = tmp;
1893 }
1894
1895 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1896 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1897 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1898 R6XX_MAX_BACKENDS, disabled_rb_mask);
1899 tiling_config |= tmp << 16;
1900 rdev->config.r600.backend_map = tmp;
1901
1902 rdev->config.r600.tile_config = tiling_config;
1903 WREG32(GB_TILING_CONFIG, tiling_config);
1904 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1905 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1906 WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
1907
1908 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1909 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1910 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1911
1912 /* Setup some CP states */
1913 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1914 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1915
1916 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1917 SYNC_WALKER | SYNC_ALIGNER));
1918 /* Setup various GPU states */
1919 if (rdev->family == CHIP_RV670)
1920 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1921
1922 tmp = RREG32(SX_DEBUG_1);
1923 tmp |= SMX_EVENT_RELEASE;
1924 if ((rdev->family > CHIP_R600))
1925 tmp |= ENABLE_NEW_SMX_ADDRESS;
1926 WREG32(SX_DEBUG_1, tmp);
1927
1928 if (((rdev->family) == CHIP_R600) ||
1929 ((rdev->family) == CHIP_RV630) ||
1930 ((rdev->family) == CHIP_RV610) ||
1931 ((rdev->family) == CHIP_RV620) ||
1932 ((rdev->family) == CHIP_RS780) ||
1933 ((rdev->family) == CHIP_RS880)) {
1934 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1935 } else {
1936 WREG32(DB_DEBUG, 0);
1937 }
1938 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1939 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1940
1941 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1942 WREG32(VGT_NUM_INSTANCES, 0);
1943
1944 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1945 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1946
1947 tmp = RREG32(SQ_MS_FIFO_SIZES);
1948 if (((rdev->family) == CHIP_RV610) ||
1949 ((rdev->family) == CHIP_RV620) ||
1950 ((rdev->family) == CHIP_RS780) ||
1951 ((rdev->family) == CHIP_RS880)) {
1952 tmp = (CACHE_FIFO_SIZE(0xa) |
1953 FETCH_FIFO_HIWATER(0xa) |
1954 DONE_FIFO_HIWATER(0xe0) |
1955 ALU_UPDATE_FIFO_HIWATER(0x8));
1956 } else if (((rdev->family) == CHIP_R600) ||
1957 ((rdev->family) == CHIP_RV630)) {
1958 tmp &= ~DONE_FIFO_HIWATER(0xff);
1959 tmp |= DONE_FIFO_HIWATER(0x4);
1960 }
1961 WREG32(SQ_MS_FIFO_SIZES, tmp);
1962
1963 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1964 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1965 */
1966 sq_config = RREG32(SQ_CONFIG);
1967 sq_config &= ~(PS_PRIO(3) |
1968 VS_PRIO(3) |
1969 GS_PRIO(3) |
1970 ES_PRIO(3));
1971 sq_config |= (DX9_CONSTS |
1972 VC_ENABLE |
1973 PS_PRIO(0) |
1974 VS_PRIO(1) |
1975 GS_PRIO(2) |
1976 ES_PRIO(3));
1977
1978 if ((rdev->family) == CHIP_R600) {
1979 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1980 NUM_VS_GPRS(124) |
1981 NUM_CLAUSE_TEMP_GPRS(4));
1982 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1983 NUM_ES_GPRS(0));
1984 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1985 NUM_VS_THREADS(48) |
1986 NUM_GS_THREADS(4) |
1987 NUM_ES_THREADS(4));
1988 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1989 NUM_VS_STACK_ENTRIES(128));
1990 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1991 NUM_ES_STACK_ENTRIES(0));
1992 } else if (((rdev->family) == CHIP_RV610) ||
1993 ((rdev->family) == CHIP_RV620) ||
1994 ((rdev->family) == CHIP_RS780) ||
1995 ((rdev->family) == CHIP_RS880)) {
1996 /* no vertex cache */
1997 sq_config &= ~VC_ENABLE;
1998
1999 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2000 NUM_VS_GPRS(44) |
2001 NUM_CLAUSE_TEMP_GPRS(2));
2002 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2003 NUM_ES_GPRS(17));
2004 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2005 NUM_VS_THREADS(78) |
2006 NUM_GS_THREADS(4) |
2007 NUM_ES_THREADS(31));
2008 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2009 NUM_VS_STACK_ENTRIES(40));
2010 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2011 NUM_ES_STACK_ENTRIES(16));
2012 } else if (((rdev->family) == CHIP_RV630) ||
2013 ((rdev->family) == CHIP_RV635)) {
2014 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2015 NUM_VS_GPRS(44) |
2016 NUM_CLAUSE_TEMP_GPRS(2));
2017 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
2018 NUM_ES_GPRS(18));
2019 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2020 NUM_VS_THREADS(78) |
2021 NUM_GS_THREADS(4) |
2022 NUM_ES_THREADS(31));
2023 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2024 NUM_VS_STACK_ENTRIES(40));
2025 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2026 NUM_ES_STACK_ENTRIES(16));
2027 } else if ((rdev->family) == CHIP_RV670) {
2028 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2029 NUM_VS_GPRS(44) |
2030 NUM_CLAUSE_TEMP_GPRS(2));
2031 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2032 NUM_ES_GPRS(17));
2033 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2034 NUM_VS_THREADS(78) |
2035 NUM_GS_THREADS(4) |
2036 NUM_ES_THREADS(31));
2037 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
2038 NUM_VS_STACK_ENTRIES(64));
2039 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
2040 NUM_ES_STACK_ENTRIES(64));
2041 }
2042
2043 WREG32(SQ_CONFIG, sq_config);
2044 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2045 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2046 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2047 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2048 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2049
2050 if (((rdev->family) == CHIP_RV610) ||
2051 ((rdev->family) == CHIP_RV620) ||
2052 ((rdev->family) == CHIP_RS780) ||
2053 ((rdev->family) == CHIP_RS880)) {
2054 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
2055 } else {
2056 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
2057 }
2058
2059 /* More default values. 2D/3D driver should adjust as needed */
2060 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
2061 S1_X(0x4) | S1_Y(0xc)));
2062 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
2063 S1_X(0x2) | S1_Y(0x2) |
2064 S2_X(0xa) | S2_Y(0x6) |
2065 S3_X(0x6) | S3_Y(0xa)));
2066 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
2067 S1_X(0x4) | S1_Y(0xc) |
2068 S2_X(0x1) | S2_Y(0x6) |
2069 S3_X(0xa) | S3_Y(0xe)));
2070 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
2071 S5_X(0x0) | S5_Y(0x0) |
2072 S6_X(0xb) | S6_Y(0x4) |
2073 S7_X(0x7) | S7_Y(0x8)));
2074
2075 WREG32(VGT_STRMOUT_EN, 0);
2076 tmp = rdev->config.r600.max_pipes * 16;
2077 switch (rdev->family) {
2078 case CHIP_RV610:
2079 case CHIP_RV620:
2080 case CHIP_RS780:
2081 case CHIP_RS880:
2082 tmp += 32;
2083 break;
2084 case CHIP_RV670:
2085 tmp += 128;
2086 break;
2087 default:
2088 break;
2089 }
2090 if (tmp > 256) {
2091 tmp = 256;
2092 }
2093 WREG32(VGT_ES_PER_GS, 128);
2094 WREG32(VGT_GS_PER_ES, tmp);
2095 WREG32(VGT_GS_PER_VS, 2);
2096 WREG32(VGT_GS_VERTEX_REUSE, 16);
2097
2098 /* more default values. 2D/3D driver should adjust as needed */
2099 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2100 WREG32(VGT_STRMOUT_EN, 0);
2101 WREG32(SX_MISC, 0);
2102 WREG32(PA_SC_MODE_CNTL, 0);
2103 WREG32(PA_SC_AA_CONFIG, 0);
2104 WREG32(PA_SC_LINE_STIPPLE, 0);
2105 WREG32(SPI_INPUT_Z, 0);
2106 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
2107 WREG32(CB_COLOR7_FRAG, 0);
2108
2109 /* Clear render buffer base addresses */
2110 WREG32(CB_COLOR0_BASE, 0);
2111 WREG32(CB_COLOR1_BASE, 0);
2112 WREG32(CB_COLOR2_BASE, 0);
2113 WREG32(CB_COLOR3_BASE, 0);
2114 WREG32(CB_COLOR4_BASE, 0);
2115 WREG32(CB_COLOR5_BASE, 0);
2116 WREG32(CB_COLOR6_BASE, 0);
2117 WREG32(CB_COLOR7_BASE, 0);
2118 WREG32(CB_COLOR7_FRAG, 0);
2119
2120 switch (rdev->family) {
2121 case CHIP_RV610:
2122 case CHIP_RV620:
2123 case CHIP_RS780:
2124 case CHIP_RS880:
2125 tmp = TC_L2_SIZE(8);
2126 break;
2127 case CHIP_RV630:
2128 case CHIP_RV635:
2129 tmp = TC_L2_SIZE(4);
2130 break;
2131 case CHIP_R600:
2132 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
2133 break;
2134 default:
2135 tmp = TC_L2_SIZE(0);
2136 break;
2137 }
2138 WREG32(TC_CNTL, tmp);
2139
2140 tmp = RREG32(HDP_HOST_PATH_CNTL);
2141 WREG32(HDP_HOST_PATH_CNTL, tmp);
2142
2143 tmp = RREG32(ARB_POP);
2144 tmp |= ENABLE_TC128;
2145 WREG32(ARB_POP, tmp);
2146
2147 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2148 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
2149 NUM_CLIP_SEQ(3)));
2150 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
2151 WREG32(VC_ENHANCE, 0);
2152 }
2153
2154
2155 /*
2156 * Indirect registers accessor
2157 */
2158 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
2159 {
2160 unsigned long flags;
2161 u32 r;
2162
2163 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2164 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2165 (void)RREG32(PCIE_PORT_INDEX);
2166 r = RREG32(PCIE_PORT_DATA);
2167 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2168 return r;
2169 }
2170
2171 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2172 {
2173 unsigned long flags;
2174
2175 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2176 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2177 (void)RREG32(PCIE_PORT_INDEX);
2178 WREG32(PCIE_PORT_DATA, (v));
2179 (void)RREG32(PCIE_PORT_DATA);
2180 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2181 }
2182
2183 /*
2184 * CP & Ring
2185 */
2186 void r600_cp_stop(struct radeon_device *rdev)
2187 {
2188 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2189 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
2190 WREG32(SCRATCH_UMSK, 0);
2191 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2192 }
2193
2194 int r600_init_microcode(struct radeon_device *rdev)
2195 {
2196 const char *chip_name;
2197 const char *rlc_chip_name;
2198 const char *smc_chip_name = "RV770";
2199 size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
2200 char fw_name[30];
2201 int err;
2202
2203 DRM_DEBUG("\n");
2204
2205 switch (rdev->family) {
2206 case CHIP_R600:
2207 chip_name = "R600";
2208 rlc_chip_name = "R600";
2209 break;
2210 case CHIP_RV610:
2211 chip_name = "RV610";
2212 rlc_chip_name = "R600";
2213 break;
2214 case CHIP_RV630:
2215 chip_name = "RV630";
2216 rlc_chip_name = "R600";
2217 break;
2218 case CHIP_RV620:
2219 chip_name = "RV620";
2220 rlc_chip_name = "R600";
2221 break;
2222 case CHIP_RV635:
2223 chip_name = "RV635";
2224 rlc_chip_name = "R600";
2225 break;
2226 case CHIP_RV670:
2227 chip_name = "RV670";
2228 rlc_chip_name = "R600";
2229 break;
2230 case CHIP_RS780:
2231 case CHIP_RS880:
2232 chip_name = "RS780";
2233 rlc_chip_name = "R600";
2234 break;
2235 case CHIP_RV770:
2236 chip_name = "RV770";
2237 rlc_chip_name = "R700";
2238 smc_chip_name = "RV770";
2239 smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
2240 break;
2241 case CHIP_RV730:
2242 chip_name = "RV730";
2243 rlc_chip_name = "R700";
2244 smc_chip_name = "RV730";
2245 smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
2246 break;
2247 case CHIP_RV710:
2248 chip_name = "RV710";
2249 rlc_chip_name = "R700";
2250 smc_chip_name = "RV710";
2251 smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
2252 break;
2253 case CHIP_RV740:
2254 chip_name = "RV730";
2255 rlc_chip_name = "R700";
2256 smc_chip_name = "RV740";
2257 smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
2258 break;
2259 case CHIP_CEDAR:
2260 chip_name = "CEDAR";
2261 rlc_chip_name = "CEDAR";
2262 smc_chip_name = "CEDAR";
2263 smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
2264 break;
2265 case CHIP_REDWOOD:
2266 chip_name = "REDWOOD";
2267 rlc_chip_name = "REDWOOD";
2268 smc_chip_name = "REDWOOD";
2269 smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
2270 break;
2271 case CHIP_JUNIPER:
2272 chip_name = "JUNIPER";
2273 rlc_chip_name = "JUNIPER";
2274 smc_chip_name = "JUNIPER";
2275 smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
2276 break;
2277 case CHIP_CYPRESS:
2278 case CHIP_HEMLOCK:
2279 chip_name = "CYPRESS";
2280 rlc_chip_name = "CYPRESS";
2281 smc_chip_name = "CYPRESS";
2282 smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
2283 break;
2284 case CHIP_PALM:
2285 chip_name = "PALM";
2286 rlc_chip_name = "SUMO";
2287 break;
2288 case CHIP_SUMO:
2289 chip_name = "SUMO";
2290 rlc_chip_name = "SUMO";
2291 break;
2292 case CHIP_SUMO2:
2293 chip_name = "SUMO2";
2294 rlc_chip_name = "SUMO";
2295 break;
2296 default: BUG();
2297 }
2298
2299 if (rdev->family >= CHIP_CEDAR) {
2300 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2301 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2302 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2303 } else if (rdev->family >= CHIP_RV770) {
2304 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2305 me_req_size = R700_PM4_UCODE_SIZE * 4;
2306 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2307 } else {
2308 pfp_req_size = R600_PFP_UCODE_SIZE * 4;
2309 me_req_size = R600_PM4_UCODE_SIZE * 12;
2310 rlc_req_size = R600_RLC_UCODE_SIZE * 4;
2311 }
2312
2313 DRM_INFO("Loading %s Microcode\n", chip_name);
2314
2315 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2316 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
2317 if (err)
2318 goto out;
2319 if (rdev->pfp_fw->size != pfp_req_size) {
2320 printk(KERN_ERR
2321 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2322 rdev->pfp_fw->size, fw_name);
2323 err = -EINVAL;
2324 goto out;
2325 }
2326
2327 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2328 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
2329 if (err)
2330 goto out;
2331 if (rdev->me_fw->size != me_req_size) {
2332 printk(KERN_ERR
2333 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2334 rdev->me_fw->size, fw_name);
2335 err = -EINVAL;
2336 }
2337
2338 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2339 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
2340 if (err)
2341 goto out;
2342 if (rdev->rlc_fw->size != rlc_req_size) {
2343 printk(KERN_ERR
2344 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2345 rdev->rlc_fw->size, fw_name);
2346 err = -EINVAL;
2347 }
2348
2349 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2350 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
2351 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2352 if (err) {
2353 printk(KERN_ERR
2354 "smc: error loading firmware \"%s\"\n",
2355 fw_name);
2356 release_firmware(rdev->smc_fw);
2357 rdev->smc_fw = NULL;
2358 } else if (rdev->smc_fw->size != smc_req_size) {
2359 printk(KERN_ERR
2360 "smc: Bogus length %zu in firmware \"%s\"\n",
2361 rdev->smc_fw->size, fw_name);
2362 err = -EINVAL;
2363 }
2364 }
2365
2366 out:
2367 if (err) {
2368 if (err != -EINVAL)
2369 printk(KERN_ERR
2370 "r600_cp: Failed to load firmware \"%s\"\n",
2371 fw_name);
2372 release_firmware(rdev->pfp_fw);
2373 rdev->pfp_fw = NULL;
2374 release_firmware(rdev->me_fw);
2375 rdev->me_fw = NULL;
2376 release_firmware(rdev->rlc_fw);
2377 rdev->rlc_fw = NULL;
2378 release_firmware(rdev->smc_fw);
2379 rdev->smc_fw = NULL;
2380 }
2381 return err;
2382 }
2383
2384 static int r600_cp_load_microcode(struct radeon_device *rdev)
2385 {
2386 const __be32 *fw_data;
2387 int i;
2388
2389 if (!rdev->me_fw || !rdev->pfp_fw)
2390 return -EINVAL;
2391
2392 r600_cp_stop(rdev);
2393
2394 WREG32(CP_RB_CNTL,
2395 #ifdef __BIG_ENDIAN
2396 BUF_SWAP_32BIT |
2397 #endif
2398 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2399
2400 /* Reset cp */
2401 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2402 RREG32(GRBM_SOFT_RESET);
2403 mdelay(15);
2404 WREG32(GRBM_SOFT_RESET, 0);
2405
2406 WREG32(CP_ME_RAM_WADDR, 0);
2407
2408 fw_data = (const __be32 *)rdev->me_fw->data;
2409 WREG32(CP_ME_RAM_WADDR, 0);
2410 for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
2411 WREG32(CP_ME_RAM_DATA,
2412 be32_to_cpup(fw_data++));
2413
2414 fw_data = (const __be32 *)rdev->pfp_fw->data;
2415 WREG32(CP_PFP_UCODE_ADDR, 0);
2416 for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
2417 WREG32(CP_PFP_UCODE_DATA,
2418 be32_to_cpup(fw_data++));
2419
2420 WREG32(CP_PFP_UCODE_ADDR, 0);
2421 WREG32(CP_ME_RAM_WADDR, 0);
2422 WREG32(CP_ME_RAM_RADDR, 0);
2423 return 0;
2424 }
2425
2426 int r600_cp_start(struct radeon_device *rdev)
2427 {
2428 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2429 int r;
2430 uint32_t cp_me;
2431
2432 r = radeon_ring_lock(rdev, ring, 7);
2433 if (r) {
2434 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2435 return r;
2436 }
2437 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2438 radeon_ring_write(ring, 0x1);
2439 if (rdev->family >= CHIP_RV770) {
2440 radeon_ring_write(ring, 0x0);
2441 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2442 } else {
2443 radeon_ring_write(ring, 0x3);
2444 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2445 }
2446 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2447 radeon_ring_write(ring, 0);
2448 radeon_ring_write(ring, 0);
2449 radeon_ring_unlock_commit(rdev, ring);
2450
2451 cp_me = 0xff;
2452 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2453 return 0;
2454 }
2455
2456 int r600_cp_resume(struct radeon_device *rdev)
2457 {
2458 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2459 u32 tmp;
2460 u32 rb_bufsz;
2461 int r;
2462
2463 /* Reset cp */
2464 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2465 RREG32(GRBM_SOFT_RESET);
2466 mdelay(15);
2467 WREG32(GRBM_SOFT_RESET, 0);
2468
2469 /* Set ring buffer size */
2470 rb_bufsz = order_base_2(ring->ring_size / 8);
2471 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2472 #ifdef __BIG_ENDIAN
2473 tmp |= BUF_SWAP_32BIT;
2474 #endif
2475 WREG32(CP_RB_CNTL, tmp);
2476 WREG32(CP_SEM_WAIT_TIMER, 0x0);
2477
2478 /* Set the write pointer delay */
2479 WREG32(CP_RB_WPTR_DELAY, 0);
2480
2481 /* Initialize the ring buffer's read and write pointers */
2482 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2483 WREG32(CP_RB_RPTR_WR, 0);
2484 ring->wptr = 0;
2485 WREG32(CP_RB_WPTR, ring->wptr);
2486
2487 /* set the wb address whether it's enabled or not */
2488 WREG32(CP_RB_RPTR_ADDR,
2489 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2490 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2491 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2492
2493 if (rdev->wb.enabled)
2494 WREG32(SCRATCH_UMSK, 0xff);
2495 else {
2496 tmp |= RB_NO_UPDATE;
2497 WREG32(SCRATCH_UMSK, 0);
2498 }
2499
2500 mdelay(1);
2501 WREG32(CP_RB_CNTL, tmp);
2502
2503 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2504 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2505
2506 ring->rptr = RREG32(CP_RB_RPTR);
2507
2508 r600_cp_start(rdev);
2509 ring->ready = true;
2510 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2511 if (r) {
2512 ring->ready = false;
2513 return r;
2514 }
2515 return 0;
2516 }
2517
2518 void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2519 {
2520 u32 rb_bufsz;
2521 int r;
2522
2523 /* Align ring size */
2524 rb_bufsz = order_base_2(ring_size / 8);
2525 ring_size = (1 << (rb_bufsz + 1)) * 4;
2526 ring->ring_size = ring_size;
2527 ring->align_mask = 16 - 1;
2528
2529 if (radeon_ring_supports_scratch_reg(rdev, ring)) {
2530 r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
2531 if (r) {
2532 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
2533 ring->rptr_save_reg = 0;
2534 }
2535 }
2536 }
2537
2538 void r600_cp_fini(struct radeon_device *rdev)
2539 {
2540 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2541 r600_cp_stop(rdev);
2542 radeon_ring_fini(rdev, ring);
2543 radeon_scratch_free(rdev, ring->rptr_save_reg);
2544 }
2545
2546 /*
2547 * GPU scratch registers helpers function.
2548 */
2549 void r600_scratch_init(struct radeon_device *rdev)
2550 {
2551 int i;
2552
2553 rdev->scratch.num_reg = 7;
2554 rdev->scratch.reg_base = SCRATCH_REG0;
2555 for (i = 0; i < rdev->scratch.num_reg; i++) {
2556 rdev->scratch.free[i] = true;
2557 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2558 }
2559 }
2560
2561 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2562 {
2563 uint32_t scratch;
2564 uint32_t tmp = 0;
2565 unsigned i;
2566 int r;
2567
2568 r = radeon_scratch_get(rdev, &scratch);
2569 if (r) {
2570 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2571 return r;
2572 }
2573 WREG32(scratch, 0xCAFEDEAD);
2574 r = radeon_ring_lock(rdev, ring, 3);
2575 if (r) {
2576 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
2577 radeon_scratch_free(rdev, scratch);
2578 return r;
2579 }
2580 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2581 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2582 radeon_ring_write(ring, 0xDEADBEEF);
2583 radeon_ring_unlock_commit(rdev, ring);
2584 for (i = 0; i < rdev->usec_timeout; i++) {
2585 tmp = RREG32(scratch);
2586 if (tmp == 0xDEADBEEF)
2587 break;
2588 DRM_UDELAY(1);
2589 }
2590 if (i < rdev->usec_timeout) {
2591 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2592 } else {
2593 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2594 ring->idx, scratch, tmp);
2595 r = -EINVAL;
2596 }
2597 radeon_scratch_free(rdev, scratch);
2598 return r;
2599 }
2600
2601 /*
2602 * CP fences/semaphores
2603 */
2604
2605 void r600_fence_ring_emit(struct radeon_device *rdev,
2606 struct radeon_fence *fence)
2607 {
2608 struct radeon_ring *ring = &rdev->ring[fence->ring];
2609
2610 if (rdev->wb.use_event) {
2611 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2612 /* flush read cache over gart */
2613 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2614 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2615 PACKET3_VC_ACTION_ENA |
2616 PACKET3_SH_ACTION_ENA);
2617 radeon_ring_write(ring, 0xFFFFFFFF);
2618 radeon_ring_write(ring, 0);
2619 radeon_ring_write(ring, 10); /* poll interval */
2620 /* EVENT_WRITE_EOP - flush caches, send int */
2621 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2622 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2623 radeon_ring_write(ring, addr & 0xffffffff);
2624 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2625 radeon_ring_write(ring, fence->seq);
2626 radeon_ring_write(ring, 0);
2627 } else {
2628 /* flush read cache over gart */
2629 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2630 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2631 PACKET3_VC_ACTION_ENA |
2632 PACKET3_SH_ACTION_ENA);
2633 radeon_ring_write(ring, 0xFFFFFFFF);
2634 radeon_ring_write(ring, 0);
2635 radeon_ring_write(ring, 10); /* poll interval */
2636 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2637 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2638 /* wait for 3D idle clean */
2639 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2640 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2641 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2642 /* Emit fence sequence & fire IRQ */
2643 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2644 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2645 radeon_ring_write(ring, fence->seq);
2646 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2647 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2648 radeon_ring_write(ring, RB_INT_STAT);
2649 }
2650 }
2651
2652 void r600_semaphore_ring_emit(struct radeon_device *rdev,
2653 struct radeon_ring *ring,
2654 struct radeon_semaphore *semaphore,
2655 bool emit_wait)
2656 {
2657 uint64_t addr = semaphore->gpu_addr;
2658 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2659
2660 if (rdev->family < CHIP_CAYMAN)
2661 sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2662
2663 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2664 radeon_ring_write(ring, addr & 0xffffffff);
2665 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2666 }
2667
2668 /**
2669 * r600_copy_cpdma - copy pages using the CP DMA engine
2670 *
2671 * @rdev: radeon_device pointer
2672 * @src_offset: src GPU address
2673 * @dst_offset: dst GPU address
2674 * @num_gpu_pages: number of GPU pages to xfer
2675 * @fence: radeon fence object
2676 *
2677 * Copy GPU paging using the CP DMA engine (r6xx+).
2678 * Used by the radeon ttm implementation to move pages if
2679 * registered as the asic copy callback.
2680 */
2681 int r600_copy_cpdma(struct radeon_device *rdev,
2682 uint64_t src_offset, uint64_t dst_offset,
2683 unsigned num_gpu_pages,
2684 struct radeon_fence **fence)
2685 {
2686 struct radeon_semaphore *sem = NULL;
2687 int ring_index = rdev->asic->copy.blit_ring_index;
2688 struct radeon_ring *ring = &rdev->ring[ring_index];
2689 u32 size_in_bytes, cur_size_in_bytes, tmp;
2690 int i, num_loops;
2691 int r = 0;
2692
2693 r = radeon_semaphore_create(rdev, &sem);
2694 if (r) {
2695 DRM_ERROR("radeon: moving bo (%d).\n", r);
2696 return r;
2697 }
2698
2699 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
2700 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
2701 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
2702 if (r) {
2703 DRM_ERROR("radeon: moving bo (%d).\n", r);
2704 radeon_semaphore_free(rdev, &sem, NULL);
2705 return r;
2706 }
2707
2708 if (radeon_fence_need_sync(*fence, ring->idx)) {
2709 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
2710 ring->idx);
2711 radeon_fence_note_sync(*fence, ring->idx);
2712 } else {
2713 radeon_semaphore_free(rdev, &sem, NULL);
2714 }
2715
2716 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2717 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2718 radeon_ring_write(ring, WAIT_3D_IDLE_bit);
2719 for (i = 0; i < num_loops; i++) {
2720 cur_size_in_bytes = size_in_bytes;
2721 if (cur_size_in_bytes > 0x1fffff)
2722 cur_size_in_bytes = 0x1fffff;
2723 size_in_bytes -= cur_size_in_bytes;
2724 tmp = upper_32_bits(src_offset) & 0xff;
2725 if (size_in_bytes == 0)
2726 tmp |= PACKET3_CP_DMA_CP_SYNC;
2727 radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
2728 radeon_ring_write(ring, src_offset & 0xffffffff);
2729 radeon_ring_write(ring, tmp);
2730 radeon_ring_write(ring, dst_offset & 0xffffffff);
2731 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
2732 radeon_ring_write(ring, cur_size_in_bytes);
2733 src_offset += cur_size_in_bytes;
2734 dst_offset += cur_size_in_bytes;
2735 }
2736 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2737 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2738 radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
2739
2740 r = radeon_fence_emit(rdev, fence, ring->idx);
2741 if (r) {
2742 radeon_ring_unlock_undo(rdev, ring);
2743 return r;
2744 }
2745
2746 radeon_ring_unlock_commit(rdev, ring);
2747 radeon_semaphore_free(rdev, &sem, *fence);
2748
2749 return r;
2750 }
2751
2752 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2753 uint32_t tiling_flags, uint32_t pitch,
2754 uint32_t offset, uint32_t obj_size)
2755 {
2756 /* FIXME: implement */
2757 return 0;
2758 }
2759
2760 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2761 {
2762 /* FIXME: implement */
2763 }
2764
2765 static int r600_startup(struct radeon_device *rdev)
2766 {
2767 struct radeon_ring *ring;
2768 int r;
2769
2770 /* enable pcie gen2 link */
2771 r600_pcie_gen2_enable(rdev);
2772
2773 /* scratch needs to be initialized before MC */
2774 r = r600_vram_scratch_init(rdev);
2775 if (r)
2776 return r;
2777
2778 r600_mc_program(rdev);
2779
2780 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2781 r = r600_init_microcode(rdev);
2782 if (r) {
2783 DRM_ERROR("Failed to load firmware!\n");
2784 return r;
2785 }
2786 }
2787
2788 if (rdev->flags & RADEON_IS_AGP) {
2789 r600_agp_enable(rdev);
2790 } else {
2791 r = r600_pcie_gart_enable(rdev);
2792 if (r)
2793 return r;
2794 }
2795 r600_gpu_init(rdev);
2796
2797 /* allocate wb buffer */
2798 r = radeon_wb_init(rdev);
2799 if (r)
2800 return r;
2801
2802 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2803 if (r) {
2804 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2805 return r;
2806 }
2807
2808 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
2809 if (r) {
2810 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2811 return r;
2812 }
2813
2814 /* Enable IRQ */
2815 if (!rdev->irq.installed) {
2816 r = radeon_irq_kms_init(rdev);
2817 if (r)
2818 return r;
2819 }
2820
2821 r = r600_irq_init(rdev);
2822 if (r) {
2823 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2824 radeon_irq_kms_fini(rdev);
2825 return r;
2826 }
2827 r600_irq_set(rdev);
2828
2829 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2830 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2831 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
2832 RADEON_CP_PACKET2);
2833 if (r)
2834 return r;
2835
2836 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2837 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
2838 DMA_RB_RPTR, DMA_RB_WPTR,
2839 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2840 if (r)
2841 return r;
2842
2843 r = r600_cp_load_microcode(rdev);
2844 if (r)
2845 return r;
2846 r = r600_cp_resume(rdev);
2847 if (r)
2848 return r;
2849
2850 r = r600_dma_resume(rdev);
2851 if (r)
2852 return r;
2853
2854 r = radeon_ib_pool_init(rdev);
2855 if (r) {
2856 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2857 return r;
2858 }
2859
2860 r = r600_audio_init(rdev);
2861 if (r) {
2862 DRM_ERROR("radeon: audio init failed\n");
2863 return r;
2864 }
2865
2866 return 0;
2867 }
2868
2869 void r600_vga_set_state(struct radeon_device *rdev, bool state)
2870 {
2871 uint32_t temp;
2872
2873 temp = RREG32(CONFIG_CNTL);
2874 if (state == false) {
2875 temp &= ~(1<<0);
2876 temp |= (1<<1);
2877 } else {
2878 temp &= ~(1<<1);
2879 }
2880 WREG32(CONFIG_CNTL, temp);
2881 }
2882
2883 int r600_resume(struct radeon_device *rdev)
2884 {
2885 int r;
2886
2887 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2888 * posting will perform necessary task to bring back GPU into good
2889 * shape.
2890 */
2891 /* post card */
2892 atom_asic_init(rdev->mode_info.atom_context);
2893
2894 rdev->accel_working = true;
2895 r = r600_startup(rdev);
2896 if (r) {
2897 DRM_ERROR("r600 startup failed on resume\n");
2898 rdev->accel_working = false;
2899 return r;
2900 }
2901
2902 return r;
2903 }
2904
2905 int r600_suspend(struct radeon_device *rdev)
2906 {
2907 r600_audio_fini(rdev);
2908 r600_cp_stop(rdev);
2909 r600_dma_stop(rdev);
2910 r600_irq_suspend(rdev);
2911 radeon_wb_disable(rdev);
2912 r600_pcie_gart_disable(rdev);
2913
2914 return 0;
2915 }
2916
2917 /* Plan is to move initialization in that function and use
2918 * helper function so that radeon_device_init pretty much
2919 * do nothing more than calling asic specific function. This
2920 * should also allow to remove a bunch of callback function
2921 * like vram_info.
2922 */
2923 int r600_init(struct radeon_device *rdev)
2924 {
2925 int r;
2926
2927 if (r600_debugfs_mc_info_init(rdev)) {
2928 DRM_ERROR("Failed to register debugfs file for mc !\n");
2929 }
2930 /* Read BIOS */
2931 if (!radeon_get_bios(rdev)) {
2932 if (ASIC_IS_AVIVO(rdev))
2933 return -EINVAL;
2934 }
2935 /* Must be an ATOMBIOS */
2936 if (!rdev->is_atom_bios) {
2937 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2938 return -EINVAL;
2939 }
2940 r = radeon_atombios_init(rdev);
2941 if (r)
2942 return r;
2943 /* Post card if necessary */
2944 if (!radeon_card_posted(rdev)) {
2945 if (!rdev->bios) {
2946 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2947 return -EINVAL;
2948 }
2949 DRM_INFO("GPU not posted. posting now...\n");
2950 atom_asic_init(rdev->mode_info.atom_context);
2951 }
2952 /* Initialize scratch registers */
2953 r600_scratch_init(rdev);
2954 /* Initialize surface registers */
2955 radeon_surface_init(rdev);
2956 /* Initialize clocks */
2957 radeon_get_clock_info(rdev->ddev);
2958 /* Fence driver */
2959 r = radeon_fence_driver_init(rdev);
2960 if (r)
2961 return r;
2962 if (rdev->flags & RADEON_IS_AGP) {
2963 r = radeon_agp_init(rdev);
2964 if (r)
2965 radeon_agp_disable(rdev);
2966 }
2967 r = r600_mc_init(rdev);
2968 if (r)
2969 return r;
2970 /* Memory manager */
2971 r = radeon_bo_init(rdev);
2972 if (r)
2973 return r;
2974
2975 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
2976 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2977
2978 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
2979 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
2980
2981 rdev->ih.ring_obj = NULL;
2982 r600_ih_ring_init(rdev, 64 * 1024);
2983
2984 r = r600_pcie_gart_init(rdev);
2985 if (r)
2986 return r;
2987
2988 rdev->accel_working = true;
2989 r = r600_startup(rdev);
2990 if (r) {
2991 dev_err(rdev->dev, "disabling GPU acceleration\n");
2992 r600_cp_fini(rdev);
2993 r600_dma_fini(rdev);
2994 r600_irq_fini(rdev);
2995 radeon_wb_fini(rdev);
2996 radeon_ib_pool_fini(rdev);
2997 radeon_irq_kms_fini(rdev);
2998 r600_pcie_gart_fini(rdev);
2999 rdev->accel_working = false;
3000 }
3001
3002 return 0;
3003 }
3004
3005 void r600_fini(struct radeon_device *rdev)
3006 {
3007 r600_audio_fini(rdev);
3008 r600_cp_fini(rdev);
3009 r600_dma_fini(rdev);
3010 r600_irq_fini(rdev);
3011 radeon_wb_fini(rdev);
3012 radeon_ib_pool_fini(rdev);
3013 radeon_irq_kms_fini(rdev);
3014 r600_pcie_gart_fini(rdev);
3015 r600_vram_scratch_fini(rdev);
3016 radeon_agp_fini(rdev);
3017 radeon_gem_fini(rdev);
3018 radeon_fence_driver_fini(rdev);
3019 radeon_bo_fini(rdev);
3020 radeon_atombios_fini(rdev);
3021 kfree(rdev->bios);
3022 rdev->bios = NULL;
3023 }
3024
3025
3026 /*
3027 * CS stuff
3028 */
3029 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3030 {
3031 struct radeon_ring *ring = &rdev->ring[ib->ring];
3032 u32 next_rptr;
3033
3034 if (ring->rptr_save_reg) {
3035 next_rptr = ring->wptr + 3 + 4;
3036 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3037 radeon_ring_write(ring, ((ring->rptr_save_reg -
3038 PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
3039 radeon_ring_write(ring, next_rptr);
3040 } else if (rdev->wb.enabled) {
3041 next_rptr = ring->wptr + 5 + 4;
3042 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
3043 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3044 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
3045 radeon_ring_write(ring, next_rptr);
3046 radeon_ring_write(ring, 0);
3047 }
3048
3049 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3050 radeon_ring_write(ring,
3051 #ifdef __BIG_ENDIAN
3052 (2 << 0) |
3053 #endif
3054 (ib->gpu_addr & 0xFFFFFFFC));
3055 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
3056 radeon_ring_write(ring, ib->length_dw);
3057 }
3058
3059 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3060 {
3061 struct radeon_ib ib;
3062 uint32_t scratch;
3063 uint32_t tmp = 0;
3064 unsigned i;
3065 int r;
3066
3067 r = radeon_scratch_get(rdev, &scratch);
3068 if (r) {
3069 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3070 return r;
3071 }
3072 WREG32(scratch, 0xCAFEDEAD);
3073 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3074 if (r) {
3075 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3076 goto free_scratch;
3077 }
3078 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
3079 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3080 ib.ptr[2] = 0xDEADBEEF;
3081 ib.length_dw = 3;
3082 r = radeon_ib_schedule(rdev, &ib, NULL);
3083 if (r) {
3084 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3085 goto free_ib;
3086 }
3087 r = radeon_fence_wait(ib.fence, false);
3088 if (r) {
3089 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3090 goto free_ib;
3091 }
3092 for (i = 0; i < rdev->usec_timeout; i++) {
3093 tmp = RREG32(scratch);
3094 if (tmp == 0xDEADBEEF)
3095 break;
3096 DRM_UDELAY(1);
3097 }
3098 if (i < rdev->usec_timeout) {
3099 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3100 } else {
3101 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3102 scratch, tmp);
3103 r = -EINVAL;
3104 }
3105 free_ib:
3106 radeon_ib_free(rdev, &ib);
3107 free_scratch:
3108 radeon_scratch_free(rdev, scratch);
3109 return r;
3110 }
3111
3112 /*
3113 * Interrupts
3114 *
3115 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
3116 * the same as the CP ring buffer, but in reverse. Rather than the CPU
3117 * writing to the ring and the GPU consuming, the GPU writes to the ring
3118 * and host consumes. As the host irq handler processes interrupts, it
3119 * increments the rptr. When the rptr catches up with the wptr, all the
3120 * current interrupts have been processed.
3121 */
3122
3123 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3124 {
3125 u32 rb_bufsz;
3126
3127 /* Align ring size */
3128 rb_bufsz = order_base_2(ring_size / 4);
3129 ring_size = (1 << rb_bufsz) * 4;
3130 rdev->ih.ring_size = ring_size;
3131 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3132 rdev->ih.rptr = 0;
3133 }
3134
3135 int r600_ih_ring_alloc(struct radeon_device *rdev)
3136 {
3137 int r;
3138
3139 /* Allocate ring buffer */
3140 if (rdev->ih.ring_obj == NULL) {
3141 r = radeon_bo_create(rdev, rdev->ih.ring_size,
3142 PAGE_SIZE, true,
3143 RADEON_GEM_DOMAIN_GTT,
3144 NULL, &rdev->ih.ring_obj);
3145 if (r) {
3146 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3147 return r;
3148 }
3149 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3150 if (unlikely(r != 0))
3151 return r;
3152 r = radeon_bo_pin(rdev->ih.ring_obj,
3153 RADEON_GEM_DOMAIN_GTT,
3154 &rdev->ih.gpu_addr);
3155 if (r) {
3156 radeon_bo_unreserve(rdev->ih.ring_obj);
3157 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
3158 return r;
3159 }
3160 r = radeon_bo_kmap(rdev->ih.ring_obj,
3161 (void **)&rdev->ih.ring);
3162 radeon_bo_unreserve(rdev->ih.ring_obj);
3163 if (r) {
3164 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
3165 return r;
3166 }
3167 }
3168 return 0;
3169 }
3170
3171 void r600_ih_ring_fini(struct radeon_device *rdev)
3172 {
3173 int r;
3174 if (rdev->ih.ring_obj) {
3175 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3176 if (likely(r == 0)) {
3177 radeon_bo_kunmap(rdev->ih.ring_obj);
3178 radeon_bo_unpin(rdev->ih.ring_obj);
3179 radeon_bo_unreserve(rdev->ih.ring_obj);
3180 }
3181 radeon_bo_unref(&rdev->ih.ring_obj);
3182 rdev->ih.ring = NULL;
3183 rdev->ih.ring_obj = NULL;
3184 }
3185 }
3186
3187 void r600_rlc_stop(struct radeon_device *rdev)
3188 {
3189
3190 if ((rdev->family >= CHIP_RV770) &&
3191 (rdev->family <= CHIP_RV740)) {
3192 /* r7xx asics need to soft reset RLC before halting */
3193 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
3194 RREG32(SRBM_SOFT_RESET);
3195 mdelay(15);
3196 WREG32(SRBM_SOFT_RESET, 0);
3197 RREG32(SRBM_SOFT_RESET);
3198 }
3199
3200 WREG32(RLC_CNTL, 0);
3201 }
3202
3203 static void r600_rlc_start(struct radeon_device *rdev)
3204 {
3205 WREG32(RLC_CNTL, RLC_ENABLE);
3206 }
3207
3208 static int r600_rlc_resume(struct radeon_device *rdev)
3209 {
3210 u32 i;
3211 const __be32 *fw_data;
3212
3213 if (!rdev->rlc_fw)
3214 return -EINVAL;
3215
3216 r600_rlc_stop(rdev);
3217
3218 WREG32(RLC_HB_CNTL, 0);
3219
3220 WREG32(RLC_HB_BASE, 0);
3221 WREG32(RLC_HB_RPTR, 0);
3222 WREG32(RLC_HB_WPTR, 0);
3223 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3224 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3225 WREG32(RLC_MC_CNTL, 0);
3226 WREG32(RLC_UCODE_CNTL, 0);
3227
3228 fw_data = (const __be32 *)rdev->rlc_fw->data;
3229 if (rdev->family >= CHIP_RV770) {
3230 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3231 WREG32(RLC_UCODE_ADDR, i);
3232 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3233 }
3234 } else {
3235 for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
3236 WREG32(RLC_UCODE_ADDR, i);
3237 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3238 }
3239 }
3240 WREG32(RLC_UCODE_ADDR, 0);
3241
3242 r600_rlc_start(rdev);
3243
3244 return 0;
3245 }
3246
3247 static void r600_enable_interrupts(struct radeon_device *rdev)
3248 {
3249 u32 ih_cntl = RREG32(IH_CNTL);
3250 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3251
3252 ih_cntl |= ENABLE_INTR;
3253 ih_rb_cntl |= IH_RB_ENABLE;
3254 WREG32(IH_CNTL, ih_cntl);
3255 WREG32(IH_RB_CNTL, ih_rb_cntl);
3256 rdev->ih.enabled = true;
3257 }
3258
3259 void r600_disable_interrupts(struct radeon_device *rdev)
3260 {
3261 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3262 u32 ih_cntl = RREG32(IH_CNTL);
3263
3264 ih_rb_cntl &= ~IH_RB_ENABLE;
3265 ih_cntl &= ~ENABLE_INTR;
3266 WREG32(IH_RB_CNTL, ih_rb_cntl);
3267 WREG32(IH_CNTL, ih_cntl);
3268 /* set rptr, wptr to 0 */
3269 WREG32(IH_RB_RPTR, 0);
3270 WREG32(IH_RB_WPTR, 0);
3271 rdev->ih.enabled = false;
3272 rdev->ih.rptr = 0;
3273 }
3274
3275 static void r600_disable_interrupt_state(struct radeon_device *rdev)
3276 {
3277 u32 tmp;
3278
3279 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3280 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3281 WREG32(DMA_CNTL, tmp);
3282 WREG32(GRBM_INT_CNTL, 0);
3283 WREG32(DxMODE_INT_MASK, 0);
3284 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
3285 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
3286 if (ASIC_IS_DCE3(rdev)) {
3287 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
3288 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
3289 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3290 WREG32(DC_HPD1_INT_CONTROL, tmp);
3291 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3292 WREG32(DC_HPD2_INT_CONTROL, tmp);
3293 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3294 WREG32(DC_HPD3_INT_CONTROL, tmp);
3295 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3296 WREG32(DC_HPD4_INT_CONTROL, tmp);
3297 if (ASIC_IS_DCE32(rdev)) {
3298 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3299 WREG32(DC_HPD5_INT_CONTROL, tmp);
3300 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3301 WREG32(DC_HPD6_INT_CONTROL, tmp);
3302 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3303 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3304 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3305 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3306 } else {
3307 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3308 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3309 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3310 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3311 }
3312 } else {
3313 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
3314 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
3315 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3316 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3317 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3318 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3319 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3320 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3321 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3322 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3323 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3324 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3325 }
3326 }
3327
3328 int r600_irq_init(struct radeon_device *rdev)
3329 {
3330 int ret = 0;
3331 int rb_bufsz;
3332 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
3333
3334 /* allocate ring */
3335 ret = r600_ih_ring_alloc(rdev);
3336 if (ret)
3337 return ret;
3338
3339 /* disable irqs */
3340 r600_disable_interrupts(rdev);
3341
3342 /* init rlc */
3343 if (rdev->family >= CHIP_CEDAR)
3344 ret = evergreen_rlc_resume(rdev);
3345 else
3346 ret = r600_rlc_resume(rdev);
3347 if (ret) {
3348 r600_ih_ring_fini(rdev);
3349 return ret;
3350 }
3351
3352 /* setup interrupt control */
3353 /* set dummy read address to ring address */
3354 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
3355 interrupt_cntl = RREG32(INTERRUPT_CNTL);
3356 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3357 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3358 */
3359 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3360 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3361 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3362 WREG32(INTERRUPT_CNTL, interrupt_cntl);
3363
3364 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3365 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
3366
3367 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3368 IH_WPTR_OVERFLOW_CLEAR |
3369 (rb_bufsz << 1));
3370
3371 if (rdev->wb.enabled)
3372 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3373
3374 /* set the writeback address whether it's enabled or not */
3375 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3376 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
3377
3378 WREG32(IH_RB_CNTL, ih_rb_cntl);
3379
3380 /* set rptr, wptr to 0 */
3381 WREG32(IH_RB_RPTR, 0);
3382 WREG32(IH_RB_WPTR, 0);
3383
3384 /* Default settings for IH_CNTL (disabled at first) */
3385 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3386 /* RPTR_REARM only works if msi's are enabled */
3387 if (rdev->msi_enabled)
3388 ih_cntl |= RPTR_REARM;
3389 WREG32(IH_CNTL, ih_cntl);
3390
3391 /* force the active interrupt state to all disabled */
3392 if (rdev->family >= CHIP_CEDAR)
3393 evergreen_disable_interrupt_state(rdev);
3394 else
3395 r600_disable_interrupt_state(rdev);
3396
3397 /* at this point everything should be setup correctly to enable master */
3398 pci_set_master(rdev->pdev);
3399
3400 /* enable irqs */
3401 r600_enable_interrupts(rdev);
3402
3403 return ret;
3404 }
3405
3406 void r600_irq_suspend(struct radeon_device *rdev)
3407 {
3408 r600_irq_disable(rdev);
3409 r600_rlc_stop(rdev);
3410 }
3411
3412 void r600_irq_fini(struct radeon_device *rdev)
3413 {
3414 r600_irq_suspend(rdev);
3415 r600_ih_ring_fini(rdev);
3416 }
3417
3418 int r600_irq_set(struct radeon_device *rdev)
3419 {
3420 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3421 u32 mode_int = 0;
3422 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3423 u32 grbm_int_cntl = 0;
3424 u32 hdmi0, hdmi1;
3425 u32 d1grph = 0, d2grph = 0;
3426 u32 dma_cntl;
3427 u32 thermal_int = 0;
3428
3429 if (!rdev->irq.installed) {
3430 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3431 return -EINVAL;
3432 }
3433 /* don't enable anything if the ih is disabled */
3434 if (!rdev->ih.enabled) {
3435 r600_disable_interrupts(rdev);
3436 /* force the active interrupt state to all disabled */
3437 r600_disable_interrupt_state(rdev);
3438 return 0;
3439 }
3440
3441 if (ASIC_IS_DCE3(rdev)) {
3442 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3443 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3444 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3445 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3446 if (ASIC_IS_DCE32(rdev)) {
3447 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3448 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3449 hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3450 hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3451 } else {
3452 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3453 hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3454 }
3455 } else {
3456 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3457 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3458 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3459 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3460 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3461 }
3462
3463 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3464
3465 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3466 thermal_int = RREG32(CG_THERMAL_INT) &
3467 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3468 } else if (rdev->family >= CHIP_RV770) {
3469 thermal_int = RREG32(RV770_CG_THERMAL_INT) &
3470 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3471 }
3472 if (rdev->irq.dpm_thermal) {
3473 DRM_DEBUG("dpm thermal\n");
3474 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
3475 }
3476
3477 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3478 DRM_DEBUG("r600_irq_set: sw int\n");
3479 cp_int_cntl |= RB_INT_ENABLE;
3480 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3481 }
3482
3483 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3484 DRM_DEBUG("r600_irq_set: sw int dma\n");
3485 dma_cntl |= TRAP_ENABLE;
3486 }
3487
3488 if (rdev->irq.crtc_vblank_int[0] ||
3489 atomic_read(&rdev->irq.pflip[0])) {
3490 DRM_DEBUG("r600_irq_set: vblank 0\n");
3491 mode_int |= D1MODE_VBLANK_INT_MASK;
3492 }
3493 if (rdev->irq.crtc_vblank_int[1] ||
3494 atomic_read(&rdev->irq.pflip[1])) {
3495 DRM_DEBUG("r600_irq_set: vblank 1\n");
3496 mode_int |= D2MODE_VBLANK_INT_MASK;
3497 }
3498 if (rdev->irq.hpd[0]) {
3499 DRM_DEBUG("r600_irq_set: hpd 1\n");
3500 hpd1 |= DC_HPDx_INT_EN;
3501 }
3502 if (rdev->irq.hpd[1]) {
3503 DRM_DEBUG("r600_irq_set: hpd 2\n");
3504 hpd2 |= DC_HPDx_INT_EN;
3505 }
3506 if (rdev->irq.hpd[2]) {
3507 DRM_DEBUG("r600_irq_set: hpd 3\n");
3508 hpd3 |= DC_HPDx_INT_EN;
3509 }
3510 if (rdev->irq.hpd[3]) {
3511 DRM_DEBUG("r600_irq_set: hpd 4\n");
3512 hpd4 |= DC_HPDx_INT_EN;
3513 }
3514 if (rdev->irq.hpd[4]) {
3515 DRM_DEBUG("r600_irq_set: hpd 5\n");
3516 hpd5 |= DC_HPDx_INT_EN;
3517 }
3518 if (rdev->irq.hpd[5]) {
3519 DRM_DEBUG("r600_irq_set: hpd 6\n");
3520 hpd6 |= DC_HPDx_INT_EN;
3521 }
3522 if (rdev->irq.afmt[0]) {
3523 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3524 hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3525 }
3526 if (rdev->irq.afmt[1]) {
3527 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3528 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3529 }
3530
3531 WREG32(CP_INT_CNTL, cp_int_cntl);
3532 WREG32(DMA_CNTL, dma_cntl);
3533 WREG32(DxMODE_INT_MASK, mode_int);
3534 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3535 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3536 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3537 if (ASIC_IS_DCE3(rdev)) {
3538 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3539 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3540 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3541 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3542 if (ASIC_IS_DCE32(rdev)) {
3543 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3544 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3545 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
3546 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
3547 } else {
3548 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3549 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3550 }
3551 } else {
3552 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3553 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3554 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3555 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3556 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3557 }
3558 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3559 WREG32(CG_THERMAL_INT, thermal_int);
3560 } else if (rdev->family >= CHIP_RV770) {
3561 WREG32(RV770_CG_THERMAL_INT, thermal_int);
3562 }
3563
3564 return 0;
3565 }
3566
3567 static void r600_irq_ack(struct radeon_device *rdev)
3568 {
3569 u32 tmp;
3570
3571 if (ASIC_IS_DCE3(rdev)) {
3572 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3573 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3574 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3575 if (ASIC_IS_DCE32(rdev)) {
3576 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3577 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3578 } else {
3579 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3580 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3581 }
3582 } else {
3583 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3584 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3585 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3586 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3587 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
3588 }
3589 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3590 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3591
3592 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3593 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3594 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3595 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3596 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3597 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3598 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3599 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3600 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3601 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3602 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3603 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3604 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3605 if (ASIC_IS_DCE3(rdev)) {
3606 tmp = RREG32(DC_HPD1_INT_CONTROL);
3607 tmp |= DC_HPDx_INT_ACK;
3608 WREG32(DC_HPD1_INT_CONTROL, tmp);
3609 } else {
3610 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3611 tmp |= DC_HPDx_INT_ACK;
3612 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3613 }
3614 }
3615 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3616 if (ASIC_IS_DCE3(rdev)) {
3617 tmp = RREG32(DC_HPD2_INT_CONTROL);
3618 tmp |= DC_HPDx_INT_ACK;
3619 WREG32(DC_HPD2_INT_CONTROL, tmp);
3620 } else {
3621 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3622 tmp |= DC_HPDx_INT_ACK;
3623 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3624 }
3625 }
3626 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3627 if (ASIC_IS_DCE3(rdev)) {
3628 tmp = RREG32(DC_HPD3_INT_CONTROL);
3629 tmp |= DC_HPDx_INT_ACK;
3630 WREG32(DC_HPD3_INT_CONTROL, tmp);
3631 } else {
3632 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3633 tmp |= DC_HPDx_INT_ACK;
3634 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3635 }
3636 }
3637 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3638 tmp = RREG32(DC_HPD4_INT_CONTROL);
3639 tmp |= DC_HPDx_INT_ACK;
3640 WREG32(DC_HPD4_INT_CONTROL, tmp);
3641 }
3642 if (ASIC_IS_DCE32(rdev)) {
3643 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3644 tmp = RREG32(DC_HPD5_INT_CONTROL);
3645 tmp |= DC_HPDx_INT_ACK;
3646 WREG32(DC_HPD5_INT_CONTROL, tmp);
3647 }
3648 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3649 tmp = RREG32(DC_HPD5_INT_CONTROL);
3650 tmp |= DC_HPDx_INT_ACK;
3651 WREG32(DC_HPD6_INT_CONTROL, tmp);
3652 }
3653 if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
3654 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
3655 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3656 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3657 }
3658 if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
3659 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
3660 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3661 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3662 }
3663 } else {
3664 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3665 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
3666 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3667 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3668 }
3669 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3670 if (ASIC_IS_DCE3(rdev)) {
3671 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
3672 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3673 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3674 } else {
3675 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
3676 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3677 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3678 }
3679 }
3680 }
3681 }
3682
3683 void r600_irq_disable(struct radeon_device *rdev)
3684 {
3685 r600_disable_interrupts(rdev);
3686 /* Wait and acknowledge irq */
3687 mdelay(1);
3688 r600_irq_ack(rdev);
3689 r600_disable_interrupt_state(rdev);
3690 }
3691
3692 static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3693 {
3694 u32 wptr, tmp;
3695
3696 if (rdev->wb.enabled)
3697 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3698 else
3699 wptr = RREG32(IH_RB_WPTR);
3700
3701 if (wptr & RB_OVERFLOW) {
3702 /* When a ring buffer overflow happen start parsing interrupt
3703 * from the last not overwritten vector (wptr + 16). Hopefully
3704 * this should allow us to catchup.
3705 */
3706 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3707 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3708 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3709 tmp = RREG32(IH_RB_CNTL);
3710 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3711 WREG32(IH_RB_CNTL, tmp);
3712 }
3713 return (wptr & rdev->ih.ptr_mask);
3714 }
3715
3716 /* r600 IV Ring
3717 * Each IV ring entry is 128 bits:
3718 * [7:0] - interrupt source id
3719 * [31:8] - reserved
3720 * [59:32] - interrupt source data
3721 * [127:60] - reserved
3722 *
3723 * The basic interrupt vector entries
3724 * are decoded as follows:
3725 * src_id src_data description
3726 * 1 0 D1 Vblank
3727 * 1 1 D1 Vline
3728 * 5 0 D2 Vblank
3729 * 5 1 D2 Vline
3730 * 19 0 FP Hot plug detection A
3731 * 19 1 FP Hot plug detection B
3732 * 19 2 DAC A auto-detection
3733 * 19 3 DAC B auto-detection
3734 * 21 4 HDMI block A
3735 * 21 5 HDMI block B
3736 * 176 - CP_INT RB
3737 * 177 - CP_INT IB1
3738 * 178 - CP_INT IB2
3739 * 181 - EOP Interrupt
3740 * 233 - GUI Idle
3741 *
3742 * Note, these are based on r600 and may need to be
3743 * adjusted or added to on newer asics
3744 */
3745
3746 int r600_irq_process(struct radeon_device *rdev)
3747 {
3748 u32 wptr;
3749 u32 rptr;
3750 u32 src_id, src_data;
3751 u32 ring_index;
3752 bool queue_hotplug = false;
3753 bool queue_hdmi = false;
3754 bool queue_thermal = false;
3755
3756 if (!rdev->ih.enabled || rdev->shutdown)
3757 return IRQ_NONE;
3758
3759 /* No MSIs, need a dummy read to flush PCI DMAs */
3760 if (!rdev->msi_enabled)
3761 RREG32(IH_RB_WPTR);
3762
3763 wptr = r600_get_ih_wptr(rdev);
3764
3765 restart_ih:
3766 /* is somebody else already processing irqs? */
3767 if (atomic_xchg(&rdev->ih.lock, 1))
3768 return IRQ_NONE;
3769
3770 rptr = rdev->ih.rptr;
3771 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3772
3773 /* Order reading of wptr vs. reading of IH ring data */
3774 rmb();
3775
3776 /* display interrupts */
3777 r600_irq_ack(rdev);
3778
3779 while (rptr != wptr) {
3780 /* wptr/rptr are in bytes! */
3781 ring_index = rptr / 4;
3782 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3783 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3784
3785 switch (src_id) {
3786 case 1: /* D1 vblank/vline */
3787 switch (src_data) {
3788 case 0: /* D1 vblank */
3789 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
3790 if (rdev->irq.crtc_vblank_int[0]) {
3791 drm_handle_vblank(rdev->ddev, 0);
3792 rdev->pm.vblank_sync = true;
3793 wake_up(&rdev->irq.vblank_queue);
3794 }
3795 if (atomic_read(&rdev->irq.pflip[0]))
3796 radeon_crtc_handle_flip(rdev, 0);
3797 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3798 DRM_DEBUG("IH: D1 vblank\n");
3799 }
3800 break;
3801 case 1: /* D1 vline */
3802 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3803 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3804 DRM_DEBUG("IH: D1 vline\n");
3805 }
3806 break;
3807 default:
3808 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3809 break;
3810 }
3811 break;
3812 case 5: /* D2 vblank/vline */
3813 switch (src_data) {
3814 case 0: /* D2 vblank */
3815 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
3816 if (rdev->irq.crtc_vblank_int[1]) {
3817 drm_handle_vblank(rdev->ddev, 1);
3818 rdev->pm.vblank_sync = true;
3819 wake_up(&rdev->irq.vblank_queue);
3820 }
3821 if (atomic_read(&rdev->irq.pflip[1]))
3822 radeon_crtc_handle_flip(rdev, 1);
3823 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3824 DRM_DEBUG("IH: D2 vblank\n");
3825 }
3826 break;
3827 case 1: /* D1 vline */
3828 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3829 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3830 DRM_DEBUG("IH: D2 vline\n");
3831 }
3832 break;
3833 default:
3834 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3835 break;
3836 }
3837 break;
3838 case 19: /* HPD/DAC hotplug */
3839 switch (src_data) {
3840 case 0:
3841 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3842 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
3843 queue_hotplug = true;
3844 DRM_DEBUG("IH: HPD1\n");
3845 }
3846 break;
3847 case 1:
3848 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3849 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
3850 queue_hotplug = true;
3851 DRM_DEBUG("IH: HPD2\n");
3852 }
3853 break;
3854 case 4:
3855 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3856 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
3857 queue_hotplug = true;
3858 DRM_DEBUG("IH: HPD3\n");
3859 }
3860 break;
3861 case 5:
3862 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3863 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
3864 queue_hotplug = true;
3865 DRM_DEBUG("IH: HPD4\n");
3866 }
3867 break;
3868 case 10:
3869 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3870 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3871 queue_hotplug = true;
3872 DRM_DEBUG("IH: HPD5\n");
3873 }
3874 break;
3875 case 12:
3876 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3877 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3878 queue_hotplug = true;
3879 DRM_DEBUG("IH: HPD6\n");
3880 }
3881 break;
3882 default:
3883 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3884 break;
3885 }
3886 break;
3887 case 21: /* hdmi */
3888 switch (src_data) {
3889 case 4:
3890 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3891 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3892 queue_hdmi = true;
3893 DRM_DEBUG("IH: HDMI0\n");
3894 }
3895 break;
3896 case 5:
3897 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3898 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3899 queue_hdmi = true;
3900 DRM_DEBUG("IH: HDMI1\n");
3901 }
3902 break;
3903 default:
3904 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3905 break;
3906 }
3907 break;
3908 case 176: /* CP_INT in ring buffer */
3909 case 177: /* CP_INT in IB1 */
3910 case 178: /* CP_INT in IB2 */
3911 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3912 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3913 break;
3914 case 181: /* CP EOP event */
3915 DRM_DEBUG("IH: CP EOP\n");
3916 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3917 break;
3918 case 224: /* DMA trap event */
3919 DRM_DEBUG("IH: DMA trap\n");
3920 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3921 break;
3922 case 230: /* thermal low to high */
3923 DRM_DEBUG("IH: thermal low to high\n");
3924 rdev->pm.dpm.thermal.high_to_low = false;
3925 queue_thermal = true;
3926 break;
3927 case 231: /* thermal high to low */
3928 DRM_DEBUG("IH: thermal high to low\n");
3929 rdev->pm.dpm.thermal.high_to_low = true;
3930 queue_thermal = true;
3931 break;
3932 case 233: /* GUI IDLE */
3933 DRM_DEBUG("IH: GUI idle\n");
3934 break;
3935 default:
3936 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3937 break;
3938 }
3939
3940 /* wptr/rptr are in bytes! */
3941 rptr += 16;
3942 rptr &= rdev->ih.ptr_mask;
3943 }
3944 if (queue_hotplug)
3945 schedule_work(&rdev->hotplug_work);
3946 if (queue_hdmi)
3947 schedule_work(&rdev->audio_work);
3948 if (queue_thermal && rdev->pm.dpm_enabled)
3949 schedule_work(&rdev->pm.dpm.thermal.work);
3950 rdev->ih.rptr = rptr;
3951 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3952 atomic_set(&rdev->ih.lock, 0);
3953
3954 /* make sure wptr hasn't changed while processing */
3955 wptr = r600_get_ih_wptr(rdev);
3956 if (wptr != rptr)
3957 goto restart_ih;
3958
3959 return IRQ_HANDLED;
3960 }
3961
3962 /*
3963 * Debugfs info
3964 */
3965 #if defined(CONFIG_DEBUG_FS)
3966
3967 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3968 {
3969 struct drm_info_node *node = (struct drm_info_node *) m->private;
3970 struct drm_device *dev = node->minor->dev;
3971 struct radeon_device *rdev = dev->dev_private;
3972
3973 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3974 DREG32_SYS(m, rdev, VM_L2_STATUS);
3975 return 0;
3976 }
3977
3978 static struct drm_info_list r600_mc_info_list[] = {
3979 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3980 };
3981 #endif
3982
3983 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3984 {
3985 #if defined(CONFIG_DEBUG_FS)
3986 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3987 #else
3988 return 0;
3989 #endif
3990 }
3991
3992 /**
3993 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3994 * rdev: radeon device structure
3995 * bo: buffer object struct which userspace is waiting for idle
3996 *
3997 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3998 * through ring buffer, this leads to corruption in rendering, see
3999 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
4000 * directly perform HDP flush by writing register through MMIO.
4001 */
4002 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
4003 {
4004 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
4005 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4006 * This seems to cause problems on some AGP cards. Just use the old
4007 * method for them.
4008 */
4009 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
4010 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
4011 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
4012 u32 tmp;
4013
4014 WREG32(HDP_DEBUG1, 0);
4015 tmp = readl((void __iomem *)ptr);
4016 } else
4017 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4018 }
4019
4020 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4021 {
4022 u32 link_width_cntl, mask;
4023
4024 if (rdev->flags & RADEON_IS_IGP)
4025 return;
4026
4027 if (!(rdev->flags & RADEON_IS_PCIE))
4028 return;
4029
4030 /* x2 cards have a special sequence */
4031 if (ASIC_IS_X2(rdev))
4032 return;
4033
4034 radeon_gui_idle(rdev);
4035
4036 switch (lanes) {
4037 case 0:
4038 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
4039 break;
4040 case 1:
4041 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
4042 break;
4043 case 2:
4044 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
4045 break;
4046 case 4:
4047 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
4048 break;
4049 case 8:
4050 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
4051 break;
4052 case 12:
4053 /* not actually supported */
4054 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
4055 break;
4056 case 16:
4057 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
4058 break;
4059 default:
4060 DRM_ERROR("invalid pcie lane request: %d\n", lanes);
4061 return;
4062 }
4063
4064 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4065 link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
4066 link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
4067 link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
4068 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
4069
4070 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4071 }
4072
4073 int r600_get_pcie_lanes(struct radeon_device *rdev)
4074 {
4075 u32 link_width_cntl;
4076
4077 if (rdev->flags & RADEON_IS_IGP)
4078 return 0;
4079
4080 if (!(rdev->flags & RADEON_IS_PCIE))
4081 return 0;
4082
4083 /* x2 cards have a special sequence */
4084 if (ASIC_IS_X2(rdev))
4085 return 0;
4086
4087 radeon_gui_idle(rdev);
4088
4089 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4090
4091 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
4092 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4093 return 1;
4094 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4095 return 2;
4096 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4097 return 4;
4098 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4099 return 8;
4100 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4101 /* not actually supported */
4102 return 12;
4103 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4104 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4105 default:
4106 return 16;
4107 }
4108 }
4109
4110 static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4111 {
4112 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
4113 u16 link_cntl2;
4114
4115 if (radeon_pcie_gen2 == 0)
4116 return;
4117
4118 if (rdev->flags & RADEON_IS_IGP)
4119 return;
4120
4121 if (!(rdev->flags & RADEON_IS_PCIE))
4122 return;
4123
4124 /* x2 cards have a special sequence */
4125 if (ASIC_IS_X2(rdev))
4126 return;
4127
4128 /* only RV6xx+ chips are supported */
4129 if (rdev->family <= CHIP_R600)
4130 return;
4131
4132 if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
4133 (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
4134 return;
4135
4136 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4137 if (speed_cntl & LC_CURRENT_DATA_RATE) {
4138 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4139 return;
4140 }
4141
4142 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4143
4144 /* 55 nm r6xx asics */
4145 if ((rdev->family == CHIP_RV670) ||
4146 (rdev->family == CHIP_RV620) ||
4147 (rdev->family == CHIP_RV635)) {
4148 /* advertise upconfig capability */
4149 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4150 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4151 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4152 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4153 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
4154 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
4155 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
4156 LC_RECONFIG_ARC_MISSING_ESCAPE);
4157 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
4158 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4159 } else {
4160 link_width_cntl |= LC_UPCONFIGURE_DIS;
4161 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4162 }
4163 }
4164
4165 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4166 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
4167 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4168
4169 /* 55 nm r6xx asics */
4170 if ((rdev->family == CHIP_RV670) ||
4171 (rdev->family == CHIP_RV620) ||
4172 (rdev->family == CHIP_RV635)) {
4173 WREG32(MM_CFGREGS_CNTL, 0x8);
4174 link_cntl2 = RREG32(0x4088);
4175 WREG32(MM_CFGREGS_CNTL, 0);
4176 /* not supported yet */
4177 if (link_cntl2 & SELECTABLE_DEEMPHASIS)
4178 return;
4179 }
4180
4181 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
4182 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
4183 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
4184 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
4185 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
4186 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4187
4188 tmp = RREG32(0x541c);
4189 WREG32(0x541c, tmp | 0x8);
4190 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
4191 link_cntl2 = RREG16(0x4088);
4192 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
4193 link_cntl2 |= 0x2;
4194 WREG16(0x4088, link_cntl2);
4195 WREG32(MM_CFGREGS_CNTL, 0);
4196
4197 if ((rdev->family == CHIP_RV670) ||
4198 (rdev->family == CHIP_RV620) ||
4199 (rdev->family == CHIP_RV635)) {
4200 training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
4201 training_cntl &= ~LC_POINT_7_PLUS_EN;
4202 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
4203 } else {
4204 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4205 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
4206 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4207 }
4208
4209 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4210 speed_cntl |= LC_GEN2_EN_STRAP;
4211 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4212
4213 } else {
4214 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4215 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4216 if (1)
4217 link_width_cntl |= LC_UPCONFIGURE_DIS;
4218 else
4219 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4220 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4221 }
4222 }
4223
4224 /**
4225 * r600_get_gpu_clock_counter - return GPU clock counter snapshot
4226 *
4227 * @rdev: radeon_device pointer
4228 *
4229 * Fetches a GPU clock counter snapshot (R6xx-cayman).
4230 * Returns the 64 bit clock counter snapshot.
4231 */
4232 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
4233 {
4234 uint64_t clock;
4235
4236 mutex_lock(&rdev->gpu_clock_mutex);
4237 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4238 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
4239 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4240 mutex_unlock(&rdev->gpu_clock_mutex);
4241 return clock;
4242 }