2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
33 #include "radeon_drm.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
41 #define PFP_UCODE_SIZE 576
42 #define PM4_UCODE_SIZE 1792
43 #define RLC_UCODE_SIZE 768
44 #define R700_PFP_UCODE_SIZE 848
45 #define R700_PM4_UCODE_SIZE 1360
46 #define R700_RLC_UCODE_SIZE 1024
47 #define EVERGREEN_PFP_UCODE_SIZE 1120
48 #define EVERGREEN_PM4_UCODE_SIZE 1376
49 #define EVERGREEN_RLC_UCODE_SIZE 768
52 MODULE_FIRMWARE("radeon/R600_pfp.bin");
53 MODULE_FIRMWARE("radeon/R600_me.bin");
54 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55 MODULE_FIRMWARE("radeon/RV610_me.bin");
56 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57 MODULE_FIRMWARE("radeon/RV630_me.bin");
58 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59 MODULE_FIRMWARE("radeon/RV620_me.bin");
60 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV635_me.bin");
62 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63 MODULE_FIRMWARE("radeon/RV670_me.bin");
64 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65 MODULE_FIRMWARE("radeon/RS780_me.bin");
66 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67 MODULE_FIRMWARE("radeon/RV770_me.bin");
68 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69 MODULE_FIRMWARE("radeon/RV730_me.bin");
70 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71 MODULE_FIRMWARE("radeon/RV710_me.bin");
72 MODULE_FIRMWARE("radeon/R600_rlc.bin");
73 MODULE_FIRMWARE("radeon/R700_rlc.bin");
74 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
76 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
77 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
79 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
82 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
87 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
);
89 /* r600,rv610,rv630,rv620,rv635,rv670 */
90 int r600_mc_wait_for_idle(struct radeon_device
*rdev
);
91 void r600_gpu_init(struct radeon_device
*rdev
);
92 void r600_fini(struct radeon_device
*rdev
);
93 void r600_irq_disable(struct radeon_device
*rdev
);
95 /* get temperature in millidegrees */
96 u32
rv6xx_get_temp(struct radeon_device
*rdev
)
98 u32 temp
= (RREG32(CG_THERMAL_STATUS
) & ASIC_T_MASK
) >>
105 actual_temp
= (temp
>> 1) & 0xff;
107 return actual_temp
* 1000;
110 void r600_pm_get_dynpm_state(struct radeon_device
*rdev
)
114 rdev
->pm
.dynpm_can_upclock
= true;
115 rdev
->pm
.dynpm_can_downclock
= true;
117 /* power state array is low to high, default is first */
118 if ((rdev
->flags
& RADEON_IS_IGP
) || (rdev
->family
== CHIP_R600
)) {
119 int min_power_state_index
= 0;
121 if (rdev
->pm
.num_power_states
> 2)
122 min_power_state_index
= 1;
124 switch (rdev
->pm
.dynpm_planned_action
) {
125 case DYNPM_ACTION_MINIMUM
:
126 rdev
->pm
.requested_power_state_index
= min_power_state_index
;
127 rdev
->pm
.requested_clock_mode_index
= 0;
128 rdev
->pm
.dynpm_can_downclock
= false;
130 case DYNPM_ACTION_DOWNCLOCK
:
131 if (rdev
->pm
.current_power_state_index
== min_power_state_index
) {
132 rdev
->pm
.requested_power_state_index
= rdev
->pm
.current_power_state_index
;
133 rdev
->pm
.dynpm_can_downclock
= false;
135 if (rdev
->pm
.active_crtc_count
> 1) {
136 for (i
= 0; i
< rdev
->pm
.num_power_states
; i
++) {
137 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
139 else if (i
>= rdev
->pm
.current_power_state_index
) {
140 rdev
->pm
.requested_power_state_index
=
141 rdev
->pm
.current_power_state_index
;
144 rdev
->pm
.requested_power_state_index
= i
;
149 if (rdev
->pm
.current_power_state_index
== 0)
150 rdev
->pm
.requested_power_state_index
=
151 rdev
->pm
.num_power_states
- 1;
153 rdev
->pm
.requested_power_state_index
=
154 rdev
->pm
.current_power_state_index
- 1;
157 rdev
->pm
.requested_clock_mode_index
= 0;
158 /* don't use the power state if crtcs are active and no display flag is set */
159 if ((rdev
->pm
.active_crtc_count
> 0) &&
160 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
161 clock_info
[rdev
->pm
.requested_clock_mode_index
].flags
&
162 RADEON_PM_MODE_NO_DISPLAY
)) {
163 rdev
->pm
.requested_power_state_index
++;
166 case DYNPM_ACTION_UPCLOCK
:
167 if (rdev
->pm
.current_power_state_index
== (rdev
->pm
.num_power_states
- 1)) {
168 rdev
->pm
.requested_power_state_index
= rdev
->pm
.current_power_state_index
;
169 rdev
->pm
.dynpm_can_upclock
= false;
171 if (rdev
->pm
.active_crtc_count
> 1) {
172 for (i
= (rdev
->pm
.num_power_states
- 1); i
>= 0; i
--) {
173 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
175 else if (i
<= rdev
->pm
.current_power_state_index
) {
176 rdev
->pm
.requested_power_state_index
=
177 rdev
->pm
.current_power_state_index
;
180 rdev
->pm
.requested_power_state_index
= i
;
185 rdev
->pm
.requested_power_state_index
=
186 rdev
->pm
.current_power_state_index
+ 1;
188 rdev
->pm
.requested_clock_mode_index
= 0;
190 case DYNPM_ACTION_DEFAULT
:
191 rdev
->pm
.requested_power_state_index
= rdev
->pm
.default_power_state_index
;
192 rdev
->pm
.requested_clock_mode_index
= 0;
193 rdev
->pm
.dynpm_can_upclock
= false;
195 case DYNPM_ACTION_NONE
:
197 DRM_ERROR("Requested mode for not defined action\n");
201 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
202 /* for now just select the first power state and switch between clock modes */
203 /* power state array is low to high, default is first (0) */
204 if (rdev
->pm
.active_crtc_count
> 1) {
205 rdev
->pm
.requested_power_state_index
= -1;
206 /* start at 1 as we don't want the default mode */
207 for (i
= 1; i
< rdev
->pm
.num_power_states
; i
++) {
208 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
210 else if ((rdev
->pm
.power_state
[i
].type
== POWER_STATE_TYPE_PERFORMANCE
) ||
211 (rdev
->pm
.power_state
[i
].type
== POWER_STATE_TYPE_BATTERY
)) {
212 rdev
->pm
.requested_power_state_index
= i
;
216 /* if nothing selected, grab the default state. */
217 if (rdev
->pm
.requested_power_state_index
== -1)
218 rdev
->pm
.requested_power_state_index
= 0;
220 rdev
->pm
.requested_power_state_index
= 1;
222 switch (rdev
->pm
.dynpm_planned_action
) {
223 case DYNPM_ACTION_MINIMUM
:
224 rdev
->pm
.requested_clock_mode_index
= 0;
225 rdev
->pm
.dynpm_can_downclock
= false;
227 case DYNPM_ACTION_DOWNCLOCK
:
228 if (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
) {
229 if (rdev
->pm
.current_clock_mode_index
== 0) {
230 rdev
->pm
.requested_clock_mode_index
= 0;
231 rdev
->pm
.dynpm_can_downclock
= false;
233 rdev
->pm
.requested_clock_mode_index
=
234 rdev
->pm
.current_clock_mode_index
- 1;
236 rdev
->pm
.requested_clock_mode_index
= 0;
237 rdev
->pm
.dynpm_can_downclock
= false;
239 /* don't use the power state if crtcs are active and no display flag is set */
240 if ((rdev
->pm
.active_crtc_count
> 0) &&
241 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
242 clock_info
[rdev
->pm
.requested_clock_mode_index
].flags
&
243 RADEON_PM_MODE_NO_DISPLAY
)) {
244 rdev
->pm
.requested_clock_mode_index
++;
247 case DYNPM_ACTION_UPCLOCK
:
248 if (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
) {
249 if (rdev
->pm
.current_clock_mode_index
==
250 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].num_clock_modes
- 1)) {
251 rdev
->pm
.requested_clock_mode_index
= rdev
->pm
.current_clock_mode_index
;
252 rdev
->pm
.dynpm_can_upclock
= false;
254 rdev
->pm
.requested_clock_mode_index
=
255 rdev
->pm
.current_clock_mode_index
+ 1;
257 rdev
->pm
.requested_clock_mode_index
=
258 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].num_clock_modes
- 1;
259 rdev
->pm
.dynpm_can_upclock
= false;
262 case DYNPM_ACTION_DEFAULT
:
263 rdev
->pm
.requested_power_state_index
= rdev
->pm
.default_power_state_index
;
264 rdev
->pm
.requested_clock_mode_index
= 0;
265 rdev
->pm
.dynpm_can_upclock
= false;
267 case DYNPM_ACTION_NONE
:
269 DRM_ERROR("Requested mode for not defined action\n");
274 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
275 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
276 clock_info
[rdev
->pm
.requested_clock_mode_index
].sclk
,
277 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
278 clock_info
[rdev
->pm
.requested_clock_mode_index
].mclk
,
279 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
283 static int r600_pm_get_type_index(struct radeon_device
*rdev
,
284 enum radeon_pm_state_type ps_type
,
288 int found_instance
= -1;
290 for (i
= 0; i
< rdev
->pm
.num_power_states
; i
++) {
291 if (rdev
->pm
.power_state
[i
].type
== ps_type
) {
293 if (found_instance
== instance
)
297 /* return default if no match */
298 return rdev
->pm
.default_power_state_index
;
301 void rs780_pm_init_profile(struct radeon_device
*rdev
)
303 if (rdev
->pm
.num_power_states
== 2) {
305 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
306 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
307 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
308 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
310 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 0;
311 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 0;
312 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
313 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
315 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 0;
316 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 0;
317 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
318 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
320 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 0;
321 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 1;
322 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
323 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
325 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 0;
326 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 0;
327 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
328 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
330 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 0;
331 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 0;
332 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
333 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
335 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 0;
336 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 1;
337 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
338 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
339 } else if (rdev
->pm
.num_power_states
== 3) {
341 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
342 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
343 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
344 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
346 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 1;
347 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 1;
348 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
349 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
351 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 1;
352 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 1;
353 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
354 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
356 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 1;
357 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 2;
358 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
359 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
361 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 1;
362 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 1;
363 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
364 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
366 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 1;
367 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 1;
368 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
369 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
371 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 1;
372 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 2;
373 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
374 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
377 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
378 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
379 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
380 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
382 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 2;
383 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 2;
384 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
385 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
387 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 2;
388 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 2;
389 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
390 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
392 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 2;
393 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 3;
394 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
395 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
397 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 2;
398 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 0;
399 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
400 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
402 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 2;
403 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 0;
404 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
405 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
407 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 2;
408 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 3;
409 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
410 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
414 void r600_pm_init_profile(struct radeon_device
*rdev
)
416 if (rdev
->family
== CHIP_R600
) {
419 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
420 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
421 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
422 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
424 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
425 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
426 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
427 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
429 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
430 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
431 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
432 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
434 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
435 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
436 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
437 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
439 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
440 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
441 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
442 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
444 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
445 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
446 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
447 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
449 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
450 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
451 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
452 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
454 if (rdev
->pm
.num_power_states
< 4) {
456 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
457 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
458 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
459 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 2;
461 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 1;
462 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 1;
463 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
464 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
466 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 1;
467 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 1;
468 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
469 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
471 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 1;
472 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 1;
473 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
474 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 2;
476 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 2;
477 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 2;
478 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
479 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
481 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 2;
482 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 2;
483 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
484 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
486 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 2;
487 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 2;
488 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
489 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 2;
492 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
493 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
494 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
495 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 2;
497 if (rdev
->flags
& RADEON_IS_MOBILITY
) {
498 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
=
499 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
500 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
=
501 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
502 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
503 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
505 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
=
506 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
507 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
=
508 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
509 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
510 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
513 if (rdev
->flags
& RADEON_IS_MOBILITY
) {
514 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
=
515 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
516 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
=
517 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
518 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
519 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
521 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
=
522 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
523 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
=
524 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
525 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
526 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
529 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
=
530 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
531 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
=
532 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
533 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
534 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 2;
536 if (rdev
->flags
& RADEON_IS_MOBILITY
) {
537 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
=
538 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
539 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
=
540 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
541 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
542 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
544 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
=
545 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
546 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
=
547 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
548 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
549 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
552 if (rdev
->flags
& RADEON_IS_MOBILITY
) {
553 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
=
554 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
555 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
=
556 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
557 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
558 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
560 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
=
561 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
562 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
=
563 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
564 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
565 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
568 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
=
569 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
570 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
=
571 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
572 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
573 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 2;
578 void r600_pm_misc(struct radeon_device
*rdev
)
580 int req_ps_idx
= rdev
->pm
.requested_power_state_index
;
581 int req_cm_idx
= rdev
->pm
.requested_clock_mode_index
;
582 struct radeon_power_state
*ps
= &rdev
->pm
.power_state
[req_ps_idx
];
583 struct radeon_voltage
*voltage
= &ps
->clock_info
[req_cm_idx
].voltage
;
585 if ((voltage
->type
== VOLTAGE_SW
) && voltage
->voltage
) {
586 if (voltage
->voltage
!= rdev
->pm
.current_vddc
) {
587 radeon_atom_set_voltage(rdev
, voltage
->voltage
);
588 rdev
->pm
.current_vddc
= voltage
->voltage
;
589 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage
->voltage
);
594 bool r600_gui_idle(struct radeon_device
*rdev
)
596 if (RREG32(GRBM_STATUS
) & GUI_ACTIVE
)
602 /* hpd for digital panel detect/disconnect */
603 bool r600_hpd_sense(struct radeon_device
*rdev
, enum radeon_hpd_id hpd
)
605 bool connected
= false;
607 if (ASIC_IS_DCE3(rdev
)) {
610 if (RREG32(DC_HPD1_INT_STATUS
) & DC_HPDx_SENSE
)
614 if (RREG32(DC_HPD2_INT_STATUS
) & DC_HPDx_SENSE
)
618 if (RREG32(DC_HPD3_INT_STATUS
) & DC_HPDx_SENSE
)
622 if (RREG32(DC_HPD4_INT_STATUS
) & DC_HPDx_SENSE
)
627 if (RREG32(DC_HPD5_INT_STATUS
) & DC_HPDx_SENSE
)
631 if (RREG32(DC_HPD6_INT_STATUS
) & DC_HPDx_SENSE
)
640 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
644 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
648 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
658 void r600_hpd_set_polarity(struct radeon_device
*rdev
,
659 enum radeon_hpd_id hpd
)
662 bool connected
= r600_hpd_sense(rdev
, hpd
);
664 if (ASIC_IS_DCE3(rdev
)) {
667 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
669 tmp
&= ~DC_HPDx_INT_POLARITY
;
671 tmp
|= DC_HPDx_INT_POLARITY
;
672 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
675 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
677 tmp
&= ~DC_HPDx_INT_POLARITY
;
679 tmp
|= DC_HPDx_INT_POLARITY
;
680 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
683 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
685 tmp
&= ~DC_HPDx_INT_POLARITY
;
687 tmp
|= DC_HPDx_INT_POLARITY
;
688 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
691 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
693 tmp
&= ~DC_HPDx_INT_POLARITY
;
695 tmp
|= DC_HPDx_INT_POLARITY
;
696 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
699 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
701 tmp
&= ~DC_HPDx_INT_POLARITY
;
703 tmp
|= DC_HPDx_INT_POLARITY
;
704 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
708 tmp
= RREG32(DC_HPD6_INT_CONTROL
);
710 tmp
&= ~DC_HPDx_INT_POLARITY
;
712 tmp
|= DC_HPDx_INT_POLARITY
;
713 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
721 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
723 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
725 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
726 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
729 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
731 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
733 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
734 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
737 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
739 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
741 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
742 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
750 void r600_hpd_init(struct radeon_device
*rdev
)
752 struct drm_device
*dev
= rdev
->ddev
;
753 struct drm_connector
*connector
;
755 if (ASIC_IS_DCE3(rdev
)) {
756 u32 tmp
= DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
757 if (ASIC_IS_DCE32(rdev
))
760 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
761 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
762 switch (radeon_connector
->hpd
.hpd
) {
764 WREG32(DC_HPD1_CONTROL
, tmp
);
765 rdev
->irq
.hpd
[0] = true;
768 WREG32(DC_HPD2_CONTROL
, tmp
);
769 rdev
->irq
.hpd
[1] = true;
772 WREG32(DC_HPD3_CONTROL
, tmp
);
773 rdev
->irq
.hpd
[2] = true;
776 WREG32(DC_HPD4_CONTROL
, tmp
);
777 rdev
->irq
.hpd
[3] = true;
781 WREG32(DC_HPD5_CONTROL
, tmp
);
782 rdev
->irq
.hpd
[4] = true;
785 WREG32(DC_HPD6_CONTROL
, tmp
);
786 rdev
->irq
.hpd
[5] = true;
793 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
794 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
795 switch (radeon_connector
->hpd
.hpd
) {
797 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
798 rdev
->irq
.hpd
[0] = true;
801 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
802 rdev
->irq
.hpd
[1] = true;
805 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
806 rdev
->irq
.hpd
[2] = true;
813 if (rdev
->irq
.installed
)
817 void r600_hpd_fini(struct radeon_device
*rdev
)
819 struct drm_device
*dev
= rdev
->ddev
;
820 struct drm_connector
*connector
;
822 if (ASIC_IS_DCE3(rdev
)) {
823 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
824 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
825 switch (radeon_connector
->hpd
.hpd
) {
827 WREG32(DC_HPD1_CONTROL
, 0);
828 rdev
->irq
.hpd
[0] = false;
831 WREG32(DC_HPD2_CONTROL
, 0);
832 rdev
->irq
.hpd
[1] = false;
835 WREG32(DC_HPD3_CONTROL
, 0);
836 rdev
->irq
.hpd
[2] = false;
839 WREG32(DC_HPD4_CONTROL
, 0);
840 rdev
->irq
.hpd
[3] = false;
844 WREG32(DC_HPD5_CONTROL
, 0);
845 rdev
->irq
.hpd
[4] = false;
848 WREG32(DC_HPD6_CONTROL
, 0);
849 rdev
->irq
.hpd
[5] = false;
856 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
857 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
858 switch (radeon_connector
->hpd
.hpd
) {
860 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, 0);
861 rdev
->irq
.hpd
[0] = false;
864 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, 0);
865 rdev
->irq
.hpd
[1] = false;
868 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, 0);
869 rdev
->irq
.hpd
[2] = false;
881 void r600_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
886 /* flush hdp cache so updates hit vram */
887 if ((rdev
->family
>= CHIP_RV770
) && (rdev
->family
<= CHIP_RV740
)) {
888 void __iomem
*ptr
= (void *)rdev
->gart
.table
.vram
.ptr
;
891 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
892 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
894 WREG32(HDP_DEBUG1
, 0);
895 tmp
= readl((void __iomem
*)ptr
);
897 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
899 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR
, rdev
->mc
.gtt_start
>> 12);
900 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR
, (rdev
->mc
.gtt_end
- 1) >> 12);
901 WREG32(VM_CONTEXT0_REQUEST_RESPONSE
, REQUEST_TYPE(1));
902 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
904 tmp
= RREG32(VM_CONTEXT0_REQUEST_RESPONSE
);
905 tmp
= (tmp
& RESPONSE_TYPE_MASK
) >> RESPONSE_TYPE_SHIFT
;
907 printk(KERN_WARNING
"[drm] r600 flush TLB failed\n");
917 int r600_pcie_gart_init(struct radeon_device
*rdev
)
921 if (rdev
->gart
.table
.vram
.robj
) {
922 WARN(1, "R600 PCIE GART already initialized.\n");
925 /* Initialize common gart structure */
926 r
= radeon_gart_init(rdev
);
929 rdev
->gart
.table_size
= rdev
->gart
.num_gpu_pages
* 8;
930 return radeon_gart_table_vram_alloc(rdev
);
933 int r600_pcie_gart_enable(struct radeon_device
*rdev
)
938 if (rdev
->gart
.table
.vram
.robj
== NULL
) {
939 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
942 r
= radeon_gart_table_vram_pin(rdev
);
945 radeon_gart_restore(rdev
);
948 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
949 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
950 EFFECTIVE_L2_QUEUE_SIZE(7));
951 WREG32(VM_L2_CNTL2
, 0);
952 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
953 /* Setup TLB control */
954 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
955 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
956 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
957 ENABLE_WAIT_L2_QUERY
;
958 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
959 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
960 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
961 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
962 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
963 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
964 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
965 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
966 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
967 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
968 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
969 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
970 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
971 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
972 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
973 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
974 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
975 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
976 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
977 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
978 (u32
)(rdev
->dummy_page
.addr
>> 12));
979 for (i
= 1; i
< 7; i
++)
980 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
982 r600_pcie_gart_tlb_flush(rdev
);
983 rdev
->gart
.ready
= true;
987 void r600_pcie_gart_disable(struct radeon_device
*rdev
)
992 /* Disable all tables */
993 for (i
= 0; i
< 7; i
++)
994 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
996 /* Disable L2 cache */
997 WREG32(VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
|
998 EFFECTIVE_L2_QUEUE_SIZE(7));
999 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1000 /* Setup L1 TLB control */
1001 tmp
= EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1002 ENABLE_WAIT_L2_QUERY
;
1003 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
1004 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
1005 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
1006 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
1007 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
1008 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
1009 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
1010 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
1011 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
);
1012 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
);
1013 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
1014 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
1015 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
);
1016 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
1017 if (rdev
->gart
.table
.vram
.robj
) {
1018 r
= radeon_bo_reserve(rdev
->gart
.table
.vram
.robj
, false);
1019 if (likely(r
== 0)) {
1020 radeon_bo_kunmap(rdev
->gart
.table
.vram
.robj
);
1021 radeon_bo_unpin(rdev
->gart
.table
.vram
.robj
);
1022 radeon_bo_unreserve(rdev
->gart
.table
.vram
.robj
);
1027 void r600_pcie_gart_fini(struct radeon_device
*rdev
)
1029 radeon_gart_fini(rdev
);
1030 r600_pcie_gart_disable(rdev
);
1031 radeon_gart_table_vram_free(rdev
);
1034 void r600_agp_enable(struct radeon_device
*rdev
)
1039 /* Setup L2 cache */
1040 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
1041 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
1042 EFFECTIVE_L2_QUEUE_SIZE(7));
1043 WREG32(VM_L2_CNTL2
, 0);
1044 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1045 /* Setup TLB control */
1046 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
1047 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
1048 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1049 ENABLE_WAIT_L2_QUERY
;
1050 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
1051 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
1052 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
1053 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
1054 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
1055 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
1056 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
1057 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
1058 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
1059 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
1060 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
1061 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
1062 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1063 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1064 for (i
= 0; i
< 7; i
++)
1065 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
1068 int r600_mc_wait_for_idle(struct radeon_device
*rdev
)
1073 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
1074 /* read MC_STATUS */
1075 tmp
= RREG32(R_000E50_SRBM_STATUS
) & 0x3F00;
1083 static void r600_mc_program(struct radeon_device
*rdev
)
1085 struct rv515_mc_save save
;
1089 /* Initialize HDP */
1090 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1091 WREG32((0x2c14 + j
), 0x00000000);
1092 WREG32((0x2c18 + j
), 0x00000000);
1093 WREG32((0x2c1c + j
), 0x00000000);
1094 WREG32((0x2c20 + j
), 0x00000000);
1095 WREG32((0x2c24 + j
), 0x00000000);
1097 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
1099 rv515_mc_stop(rdev
, &save
);
1100 if (r600_mc_wait_for_idle(rdev
)) {
1101 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1103 /* Lockout access through VGA aperture (doesn't exist before R600) */
1104 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
1105 /* Update configuration */
1106 if (rdev
->flags
& RADEON_IS_AGP
) {
1107 if (rdev
->mc
.vram_start
< rdev
->mc
.gtt_start
) {
1108 /* VRAM before AGP */
1109 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1110 rdev
->mc
.vram_start
>> 12);
1111 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1112 rdev
->mc
.gtt_end
>> 12);
1114 /* VRAM after AGP */
1115 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1116 rdev
->mc
.gtt_start
>> 12);
1117 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1118 rdev
->mc
.vram_end
>> 12);
1121 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
, rdev
->mc
.vram_start
>> 12);
1122 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
, rdev
->mc
.vram_end
>> 12);
1124 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
, 0);
1125 tmp
= ((rdev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
1126 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
1127 WREG32(MC_VM_FB_LOCATION
, tmp
);
1128 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
1129 WREG32(HDP_NONSURFACE_INFO
, (2 << 7));
1130 WREG32(HDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
1131 if (rdev
->flags
& RADEON_IS_AGP
) {
1132 WREG32(MC_VM_AGP_TOP
, rdev
->mc
.gtt_end
>> 22);
1133 WREG32(MC_VM_AGP_BOT
, rdev
->mc
.gtt_start
>> 22);
1134 WREG32(MC_VM_AGP_BASE
, rdev
->mc
.agp_base
>> 22);
1136 WREG32(MC_VM_AGP_BASE
, 0);
1137 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
1138 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
1140 if (r600_mc_wait_for_idle(rdev
)) {
1141 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1143 rv515_mc_resume(rdev
, &save
);
1144 /* we need to own VRAM, so turn off the VGA renderer here
1145 * to stop it overwriting our objects */
1146 rv515_vga_render_disable(rdev
);
1150 * r600_vram_gtt_location - try to find VRAM & GTT location
1151 * @rdev: radeon device structure holding all necessary informations
1152 * @mc: memory controller structure holding memory informations
1154 * Function will place try to place VRAM at same place as in CPU (PCI)
1155 * address space as some GPU seems to have issue when we reprogram at
1156 * different address space.
1158 * If there is not enough space to fit the unvisible VRAM after the
1159 * aperture then we limit the VRAM size to the aperture.
1161 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1162 * them to be in one from GPU point of view so that we can program GPU to
1163 * catch access outside them (weird GPU policy see ??).
1165 * This function will never fails, worst case are limiting VRAM or GTT.
1167 * Note: GTT start, end, size should be initialized before calling this
1168 * function on AGP platform.
1170 void r600_vram_gtt_location(struct radeon_device
*rdev
, struct radeon_mc
*mc
)
1172 u64 size_bf
, size_af
;
1174 if (mc
->mc_vram_size
> 0xE0000000) {
1175 /* leave room for at least 512M GTT */
1176 dev_warn(rdev
->dev
, "limiting VRAM\n");
1177 mc
->real_vram_size
= 0xE0000000;
1178 mc
->mc_vram_size
= 0xE0000000;
1180 if (rdev
->flags
& RADEON_IS_AGP
) {
1181 size_bf
= mc
->gtt_start
;
1182 size_af
= 0xFFFFFFFF - mc
->gtt_end
+ 1;
1183 if (size_bf
> size_af
) {
1184 if (mc
->mc_vram_size
> size_bf
) {
1185 dev_warn(rdev
->dev
, "limiting VRAM\n");
1186 mc
->real_vram_size
= size_bf
;
1187 mc
->mc_vram_size
= size_bf
;
1189 mc
->vram_start
= mc
->gtt_start
- mc
->mc_vram_size
;
1191 if (mc
->mc_vram_size
> size_af
) {
1192 dev_warn(rdev
->dev
, "limiting VRAM\n");
1193 mc
->real_vram_size
= size_af
;
1194 mc
->mc_vram_size
= size_af
;
1196 mc
->vram_start
= mc
->gtt_end
;
1198 mc
->vram_end
= mc
->vram_start
+ mc
->mc_vram_size
- 1;
1199 dev_info(rdev
->dev
, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1200 mc
->mc_vram_size
>> 20, mc
->vram_start
,
1201 mc
->vram_end
, mc
->real_vram_size
>> 20);
1204 if (rdev
->flags
& RADEON_IS_IGP
)
1205 base
= (RREG32(MC_VM_FB_LOCATION
) & 0xFFFF) << 24;
1206 radeon_vram_location(rdev
, &rdev
->mc
, base
);
1207 rdev
->mc
.gtt_base_align
= 0;
1208 radeon_gtt_location(rdev
, mc
);
1212 int r600_mc_init(struct radeon_device
*rdev
)
1215 int chansize
, numchan
;
1217 /* Get VRAM informations */
1218 rdev
->mc
.vram_is_ddr
= true;
1219 tmp
= RREG32(RAMCFG
);
1220 if (tmp
& CHANSIZE_OVERRIDE
) {
1222 } else if (tmp
& CHANSIZE_MASK
) {
1227 tmp
= RREG32(CHMAP
);
1228 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
1243 rdev
->mc
.vram_width
= numchan
* chansize
;
1244 /* Could aper size report 0 ? */
1245 rdev
->mc
.aper_base
= pci_resource_start(rdev
->pdev
, 0);
1246 rdev
->mc
.aper_size
= pci_resource_len(rdev
->pdev
, 0);
1247 /* Setup GPU memory space */
1248 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
);
1249 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
);
1250 rdev
->mc
.visible_vram_size
= rdev
->mc
.aper_size
;
1251 rdev
->mc
.active_vram_size
= rdev
->mc
.visible_vram_size
;
1252 r600_vram_gtt_location(rdev
, &rdev
->mc
);
1254 if (rdev
->flags
& RADEON_IS_IGP
) {
1255 rs690_pm_info(rdev
);
1256 rdev
->mc
.igp_sideport_enabled
= radeon_atombios_sideport_present(rdev
);
1258 radeon_update_bandwidth_info(rdev
);
1262 /* We doesn't check that the GPU really needs a reset we simply do the
1263 * reset, it's up to the caller to determine if the GPU needs one. We
1264 * might add an helper function to check that.
1266 int r600_gpu_soft_reset(struct radeon_device
*rdev
)
1268 struct rv515_mc_save save
;
1269 u32 grbm_busy_mask
= S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1270 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1271 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1272 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1273 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1274 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1275 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1276 S_008010_GUI_ACTIVE(1);
1277 u32 grbm2_busy_mask
= S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1278 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1279 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1280 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1281 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1282 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1283 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1284 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1287 dev_info(rdev
->dev
, "GPU softreset \n");
1288 dev_info(rdev
->dev
, " R_008010_GRBM_STATUS=0x%08X\n",
1289 RREG32(R_008010_GRBM_STATUS
));
1290 dev_info(rdev
->dev
, " R_008014_GRBM_STATUS2=0x%08X\n",
1291 RREG32(R_008014_GRBM_STATUS2
));
1292 dev_info(rdev
->dev
, " R_000E50_SRBM_STATUS=0x%08X\n",
1293 RREG32(R_000E50_SRBM_STATUS
));
1294 rv515_mc_stop(rdev
, &save
);
1295 if (r600_mc_wait_for_idle(rdev
)) {
1296 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1298 /* Disable CP parsing/prefetching */
1299 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
1300 /* Check if any of the rendering block is busy and reset it */
1301 if ((RREG32(R_008010_GRBM_STATUS
) & grbm_busy_mask
) ||
1302 (RREG32(R_008014_GRBM_STATUS2
) & grbm2_busy_mask
)) {
1303 tmp
= S_008020_SOFT_RESET_CR(1) |
1304 S_008020_SOFT_RESET_DB(1) |
1305 S_008020_SOFT_RESET_CB(1) |
1306 S_008020_SOFT_RESET_PA(1) |
1307 S_008020_SOFT_RESET_SC(1) |
1308 S_008020_SOFT_RESET_SMX(1) |
1309 S_008020_SOFT_RESET_SPI(1) |
1310 S_008020_SOFT_RESET_SX(1) |
1311 S_008020_SOFT_RESET_SH(1) |
1312 S_008020_SOFT_RESET_TC(1) |
1313 S_008020_SOFT_RESET_TA(1) |
1314 S_008020_SOFT_RESET_VC(1) |
1315 S_008020_SOFT_RESET_VGT(1);
1316 dev_info(rdev
->dev
, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
1317 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
1318 RREG32(R_008020_GRBM_SOFT_RESET
);
1320 WREG32(R_008020_GRBM_SOFT_RESET
, 0);
1322 /* Reset CP (we always reset CP) */
1323 tmp
= S_008020_SOFT_RESET_CP(1);
1324 dev_info(rdev
->dev
, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
1325 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
1326 RREG32(R_008020_GRBM_SOFT_RESET
);
1328 WREG32(R_008020_GRBM_SOFT_RESET
, 0);
1329 /* Wait a little for things to settle down */
1331 dev_info(rdev
->dev
, " R_008010_GRBM_STATUS=0x%08X\n",
1332 RREG32(R_008010_GRBM_STATUS
));
1333 dev_info(rdev
->dev
, " R_008014_GRBM_STATUS2=0x%08X\n",
1334 RREG32(R_008014_GRBM_STATUS2
));
1335 dev_info(rdev
->dev
, " R_000E50_SRBM_STATUS=0x%08X\n",
1336 RREG32(R_000E50_SRBM_STATUS
));
1337 rv515_mc_resume(rdev
, &save
);
1341 bool r600_gpu_is_lockup(struct radeon_device
*rdev
)
1348 srbm_status
= RREG32(R_000E50_SRBM_STATUS
);
1349 grbm_status
= RREG32(R_008010_GRBM_STATUS
);
1350 grbm_status2
= RREG32(R_008014_GRBM_STATUS2
);
1351 if (!G_008010_GUI_ACTIVE(grbm_status
)) {
1352 r100_gpu_lockup_update(&rdev
->config
.r300
.lockup
, &rdev
->cp
);
1355 /* force CP activities */
1356 r
= radeon_ring_lock(rdev
, 2);
1359 radeon_ring_write(rdev
, 0x80000000);
1360 radeon_ring_write(rdev
, 0x80000000);
1361 radeon_ring_unlock_commit(rdev
);
1363 rdev
->cp
.rptr
= RREG32(R600_CP_RB_RPTR
);
1364 return r100_gpu_cp_is_lockup(rdev
, &rdev
->config
.r300
.lockup
, &rdev
->cp
);
1367 int r600_asic_reset(struct radeon_device
*rdev
)
1369 return r600_gpu_soft_reset(rdev
);
1372 static u32
r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes
,
1374 u32 backend_disable_mask
)
1376 u32 backend_map
= 0;
1377 u32 enabled_backends_mask
;
1378 u32 enabled_backends_count
;
1380 u32 swizzle_pipe
[R6XX_MAX_PIPES
];
1384 if (num_tile_pipes
> R6XX_MAX_PIPES
)
1385 num_tile_pipes
= R6XX_MAX_PIPES
;
1386 if (num_tile_pipes
< 1)
1388 if (num_backends
> R6XX_MAX_BACKENDS
)
1389 num_backends
= R6XX_MAX_BACKENDS
;
1390 if (num_backends
< 1)
1393 enabled_backends_mask
= 0;
1394 enabled_backends_count
= 0;
1395 for (i
= 0; i
< R6XX_MAX_BACKENDS
; ++i
) {
1396 if (((backend_disable_mask
>> i
) & 1) == 0) {
1397 enabled_backends_mask
|= (1 << i
);
1398 ++enabled_backends_count
;
1400 if (enabled_backends_count
== num_backends
)
1404 if (enabled_backends_count
== 0) {
1405 enabled_backends_mask
= 1;
1406 enabled_backends_count
= 1;
1409 if (enabled_backends_count
!= num_backends
)
1410 num_backends
= enabled_backends_count
;
1412 memset((uint8_t *)&swizzle_pipe
[0], 0, sizeof(u32
) * R6XX_MAX_PIPES
);
1413 switch (num_tile_pipes
) {
1415 swizzle_pipe
[0] = 0;
1418 swizzle_pipe
[0] = 0;
1419 swizzle_pipe
[1] = 1;
1422 swizzle_pipe
[0] = 0;
1423 swizzle_pipe
[1] = 1;
1424 swizzle_pipe
[2] = 2;
1427 swizzle_pipe
[0] = 0;
1428 swizzle_pipe
[1] = 1;
1429 swizzle_pipe
[2] = 2;
1430 swizzle_pipe
[3] = 3;
1433 swizzle_pipe
[0] = 0;
1434 swizzle_pipe
[1] = 1;
1435 swizzle_pipe
[2] = 2;
1436 swizzle_pipe
[3] = 3;
1437 swizzle_pipe
[4] = 4;
1440 swizzle_pipe
[0] = 0;
1441 swizzle_pipe
[1] = 2;
1442 swizzle_pipe
[2] = 4;
1443 swizzle_pipe
[3] = 5;
1444 swizzle_pipe
[4] = 1;
1445 swizzle_pipe
[5] = 3;
1448 swizzle_pipe
[0] = 0;
1449 swizzle_pipe
[1] = 2;
1450 swizzle_pipe
[2] = 4;
1451 swizzle_pipe
[3] = 6;
1452 swizzle_pipe
[4] = 1;
1453 swizzle_pipe
[5] = 3;
1454 swizzle_pipe
[6] = 5;
1457 swizzle_pipe
[0] = 0;
1458 swizzle_pipe
[1] = 2;
1459 swizzle_pipe
[2] = 4;
1460 swizzle_pipe
[3] = 6;
1461 swizzle_pipe
[4] = 1;
1462 swizzle_pipe
[5] = 3;
1463 swizzle_pipe
[6] = 5;
1464 swizzle_pipe
[7] = 7;
1469 for (cur_pipe
= 0; cur_pipe
< num_tile_pipes
; ++cur_pipe
) {
1470 while (((1 << cur_backend
) & enabled_backends_mask
) == 0)
1471 cur_backend
= (cur_backend
+ 1) % R6XX_MAX_BACKENDS
;
1473 backend_map
|= (u32
)(((cur_backend
& 3) << (swizzle_pipe
[cur_pipe
] * 2)));
1475 cur_backend
= (cur_backend
+ 1) % R6XX_MAX_BACKENDS
;
1481 int r600_count_pipe_bits(uint32_t val
)
1485 for (i
= 0; i
< 32; i
++) {
1492 void r600_gpu_init(struct radeon_device
*rdev
)
1497 u32 cc_rb_backend_disable
;
1498 u32 cc_gc_shader_pipe_config
;
1502 u32 sq_gpr_resource_mgmt_1
= 0;
1503 u32 sq_gpr_resource_mgmt_2
= 0;
1504 u32 sq_thread_resource_mgmt
= 0;
1505 u32 sq_stack_resource_mgmt_1
= 0;
1506 u32 sq_stack_resource_mgmt_2
= 0;
1508 /* FIXME: implement */
1509 switch (rdev
->family
) {
1511 rdev
->config
.r600
.max_pipes
= 4;
1512 rdev
->config
.r600
.max_tile_pipes
= 8;
1513 rdev
->config
.r600
.max_simds
= 4;
1514 rdev
->config
.r600
.max_backends
= 4;
1515 rdev
->config
.r600
.max_gprs
= 256;
1516 rdev
->config
.r600
.max_threads
= 192;
1517 rdev
->config
.r600
.max_stack_entries
= 256;
1518 rdev
->config
.r600
.max_hw_contexts
= 8;
1519 rdev
->config
.r600
.max_gs_threads
= 16;
1520 rdev
->config
.r600
.sx_max_export_size
= 128;
1521 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1522 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1523 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1527 rdev
->config
.r600
.max_pipes
= 2;
1528 rdev
->config
.r600
.max_tile_pipes
= 2;
1529 rdev
->config
.r600
.max_simds
= 3;
1530 rdev
->config
.r600
.max_backends
= 1;
1531 rdev
->config
.r600
.max_gprs
= 128;
1532 rdev
->config
.r600
.max_threads
= 192;
1533 rdev
->config
.r600
.max_stack_entries
= 128;
1534 rdev
->config
.r600
.max_hw_contexts
= 8;
1535 rdev
->config
.r600
.max_gs_threads
= 4;
1536 rdev
->config
.r600
.sx_max_export_size
= 128;
1537 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1538 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1539 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1545 rdev
->config
.r600
.max_pipes
= 1;
1546 rdev
->config
.r600
.max_tile_pipes
= 1;
1547 rdev
->config
.r600
.max_simds
= 2;
1548 rdev
->config
.r600
.max_backends
= 1;
1549 rdev
->config
.r600
.max_gprs
= 128;
1550 rdev
->config
.r600
.max_threads
= 192;
1551 rdev
->config
.r600
.max_stack_entries
= 128;
1552 rdev
->config
.r600
.max_hw_contexts
= 4;
1553 rdev
->config
.r600
.max_gs_threads
= 4;
1554 rdev
->config
.r600
.sx_max_export_size
= 128;
1555 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1556 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1557 rdev
->config
.r600
.sq_num_cf_insts
= 1;
1560 rdev
->config
.r600
.max_pipes
= 4;
1561 rdev
->config
.r600
.max_tile_pipes
= 4;
1562 rdev
->config
.r600
.max_simds
= 4;
1563 rdev
->config
.r600
.max_backends
= 4;
1564 rdev
->config
.r600
.max_gprs
= 192;
1565 rdev
->config
.r600
.max_threads
= 192;
1566 rdev
->config
.r600
.max_stack_entries
= 256;
1567 rdev
->config
.r600
.max_hw_contexts
= 8;
1568 rdev
->config
.r600
.max_gs_threads
= 16;
1569 rdev
->config
.r600
.sx_max_export_size
= 128;
1570 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1571 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1572 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1578 /* Initialize HDP */
1579 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1580 WREG32((0x2c14 + j
), 0x00000000);
1581 WREG32((0x2c18 + j
), 0x00000000);
1582 WREG32((0x2c1c + j
), 0x00000000);
1583 WREG32((0x2c20 + j
), 0x00000000);
1584 WREG32((0x2c24 + j
), 0x00000000);
1587 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
1591 ramcfg
= RREG32(RAMCFG
);
1592 switch (rdev
->config
.r600
.max_tile_pipes
) {
1594 tiling_config
|= PIPE_TILING(0);
1597 tiling_config
|= PIPE_TILING(1);
1600 tiling_config
|= PIPE_TILING(2);
1603 tiling_config
|= PIPE_TILING(3);
1608 rdev
->config
.r600
.tiling_npipes
= rdev
->config
.r600
.max_tile_pipes
;
1609 rdev
->config
.r600
.tiling_nbanks
= 4 << ((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
1610 tiling_config
|= BANK_TILING((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
1611 tiling_config
|= GROUP_SIZE((ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
);
1612 if ((ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
)
1613 rdev
->config
.r600
.tiling_group_size
= 512;
1615 rdev
->config
.r600
.tiling_group_size
= 256;
1616 tmp
= (ramcfg
& NOOFROWS_MASK
) >> NOOFROWS_SHIFT
;
1618 tiling_config
|= ROW_TILING(3);
1619 tiling_config
|= SAMPLE_SPLIT(3);
1621 tiling_config
|= ROW_TILING(tmp
);
1622 tiling_config
|= SAMPLE_SPLIT(tmp
);
1624 tiling_config
|= BANK_SWAPS(1);
1626 cc_rb_backend_disable
= RREG32(CC_RB_BACKEND_DISABLE
) & 0x00ff0000;
1627 cc_rb_backend_disable
|=
1628 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK
<< rdev
->config
.r600
.max_backends
) & R6XX_MAX_BACKENDS_MASK
);
1630 cc_gc_shader_pipe_config
= RREG32(CC_GC_SHADER_PIPE_CONFIG
) & 0xffffff00;
1631 cc_gc_shader_pipe_config
|=
1632 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK
<< rdev
->config
.r600
.max_pipes
) & R6XX_MAX_PIPES_MASK
);
1633 cc_gc_shader_pipe_config
|=
1634 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK
<< rdev
->config
.r600
.max_simds
) & R6XX_MAX_SIMDS_MASK
);
1636 backend_map
= r600_get_tile_pipe_to_backend_map(rdev
->config
.r600
.max_tile_pipes
,
1637 (R6XX_MAX_BACKENDS
-
1638 r600_count_pipe_bits((cc_rb_backend_disable
&
1639 R6XX_MAX_BACKENDS_MASK
) >> 16)),
1640 (cc_rb_backend_disable
>> 16));
1641 rdev
->config
.r600
.tile_config
= tiling_config
;
1642 tiling_config
|= BACKEND_MAP(backend_map
);
1643 WREG32(GB_TILING_CONFIG
, tiling_config
);
1644 WREG32(DCP_TILING_CONFIG
, tiling_config
& 0xffff);
1645 WREG32(HDP_TILING_CONFIG
, tiling_config
& 0xffff);
1648 WREG32(CC_RB_BACKEND_DISABLE
, cc_rb_backend_disable
);
1649 WREG32(CC_GC_SHADER_PIPE_CONFIG
, cc_gc_shader_pipe_config
);
1650 WREG32(GC_USER_SHADER_PIPE_CONFIG
, cc_gc_shader_pipe_config
);
1652 tmp
= R6XX_MAX_PIPES
- r600_count_pipe_bits((cc_gc_shader_pipe_config
& INACTIVE_QD_PIPES_MASK
) >> 8);
1653 WREG32(VGT_OUT_DEALLOC_CNTL
, (tmp
* 4) & DEALLOC_DIST_MASK
);
1654 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL
, ((tmp
* 4) - 2) & VTX_REUSE_DEPTH_MASK
);
1656 /* Setup some CP states */
1657 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1658 WREG32(CP_MEQ_THRESHOLDS
, (MEQ_END(0x40) | ROQ_END(0x40)));
1660 WREG32(TA_CNTL_AUX
, (DISABLE_CUBE_ANISO
| SYNC_GRADIENT
|
1661 SYNC_WALKER
| SYNC_ALIGNER
));
1662 /* Setup various GPU states */
1663 if (rdev
->family
== CHIP_RV670
)
1664 WREG32(ARB_GDEC_RD_CNTL
, 0x00000021);
1666 tmp
= RREG32(SX_DEBUG_1
);
1667 tmp
|= SMX_EVENT_RELEASE
;
1668 if ((rdev
->family
> CHIP_R600
))
1669 tmp
|= ENABLE_NEW_SMX_ADDRESS
;
1670 WREG32(SX_DEBUG_1
, tmp
);
1672 if (((rdev
->family
) == CHIP_R600
) ||
1673 ((rdev
->family
) == CHIP_RV630
) ||
1674 ((rdev
->family
) == CHIP_RV610
) ||
1675 ((rdev
->family
) == CHIP_RV620
) ||
1676 ((rdev
->family
) == CHIP_RS780
) ||
1677 ((rdev
->family
) == CHIP_RS880
)) {
1678 WREG32(DB_DEBUG
, PREZ_MUST_WAIT_FOR_POSTZ_DONE
);
1680 WREG32(DB_DEBUG
, 0);
1682 WREG32(DB_WATERMARKS
, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1683 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1685 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
1686 WREG32(VGT_NUM_INSTANCES
, 0);
1688 WREG32(SPI_CONFIG_CNTL
, GPR_WRITE_PRIORITY(0));
1689 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(0));
1691 tmp
= RREG32(SQ_MS_FIFO_SIZES
);
1692 if (((rdev
->family
) == CHIP_RV610
) ||
1693 ((rdev
->family
) == CHIP_RV620
) ||
1694 ((rdev
->family
) == CHIP_RS780
) ||
1695 ((rdev
->family
) == CHIP_RS880
)) {
1696 tmp
= (CACHE_FIFO_SIZE(0xa) |
1697 FETCH_FIFO_HIWATER(0xa) |
1698 DONE_FIFO_HIWATER(0xe0) |
1699 ALU_UPDATE_FIFO_HIWATER(0x8));
1700 } else if (((rdev
->family
) == CHIP_R600
) ||
1701 ((rdev
->family
) == CHIP_RV630
)) {
1702 tmp
&= ~DONE_FIFO_HIWATER(0xff);
1703 tmp
|= DONE_FIFO_HIWATER(0x4);
1705 WREG32(SQ_MS_FIFO_SIZES
, tmp
);
1707 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1708 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1710 sq_config
= RREG32(SQ_CONFIG
);
1711 sq_config
&= ~(PS_PRIO(3) |
1715 sq_config
|= (DX9_CONSTS
|
1722 if ((rdev
->family
) == CHIP_R600
) {
1723 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(124) |
1725 NUM_CLAUSE_TEMP_GPRS(4));
1726 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(0) |
1728 sq_thread_resource_mgmt
= (NUM_PS_THREADS(136) |
1729 NUM_VS_THREADS(48) |
1732 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(128) |
1733 NUM_VS_STACK_ENTRIES(128));
1734 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(0) |
1735 NUM_ES_STACK_ENTRIES(0));
1736 } else if (((rdev
->family
) == CHIP_RV610
) ||
1737 ((rdev
->family
) == CHIP_RV620
) ||
1738 ((rdev
->family
) == CHIP_RS780
) ||
1739 ((rdev
->family
) == CHIP_RS880
)) {
1740 /* no vertex cache */
1741 sq_config
&= ~VC_ENABLE
;
1743 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1745 NUM_CLAUSE_TEMP_GPRS(2));
1746 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
1748 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1749 NUM_VS_THREADS(78) |
1751 NUM_ES_THREADS(31));
1752 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
1753 NUM_VS_STACK_ENTRIES(40));
1754 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
1755 NUM_ES_STACK_ENTRIES(16));
1756 } else if (((rdev
->family
) == CHIP_RV630
) ||
1757 ((rdev
->family
) == CHIP_RV635
)) {
1758 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1760 NUM_CLAUSE_TEMP_GPRS(2));
1761 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(18) |
1763 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1764 NUM_VS_THREADS(78) |
1766 NUM_ES_THREADS(31));
1767 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
1768 NUM_VS_STACK_ENTRIES(40));
1769 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
1770 NUM_ES_STACK_ENTRIES(16));
1771 } else if ((rdev
->family
) == CHIP_RV670
) {
1772 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1774 NUM_CLAUSE_TEMP_GPRS(2));
1775 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
1777 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1778 NUM_VS_THREADS(78) |
1780 NUM_ES_THREADS(31));
1781 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(64) |
1782 NUM_VS_STACK_ENTRIES(64));
1783 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(64) |
1784 NUM_ES_STACK_ENTRIES(64));
1787 WREG32(SQ_CONFIG
, sq_config
);
1788 WREG32(SQ_GPR_RESOURCE_MGMT_1
, sq_gpr_resource_mgmt_1
);
1789 WREG32(SQ_GPR_RESOURCE_MGMT_2
, sq_gpr_resource_mgmt_2
);
1790 WREG32(SQ_THREAD_RESOURCE_MGMT
, sq_thread_resource_mgmt
);
1791 WREG32(SQ_STACK_RESOURCE_MGMT_1
, sq_stack_resource_mgmt_1
);
1792 WREG32(SQ_STACK_RESOURCE_MGMT_2
, sq_stack_resource_mgmt_2
);
1794 if (((rdev
->family
) == CHIP_RV610
) ||
1795 ((rdev
->family
) == CHIP_RV620
) ||
1796 ((rdev
->family
) == CHIP_RS780
) ||
1797 ((rdev
->family
) == CHIP_RS880
)) {
1798 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(TC_ONLY
));
1800 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(VC_AND_TC
));
1803 /* More default values. 2D/3D driver should adjust as needed */
1804 WREG32(PA_SC_AA_SAMPLE_LOCS_2S
, (S0_X(0xc) | S0_Y(0x4) |
1805 S1_X(0x4) | S1_Y(0xc)));
1806 WREG32(PA_SC_AA_SAMPLE_LOCS_4S
, (S0_X(0xe) | S0_Y(0xe) |
1807 S1_X(0x2) | S1_Y(0x2) |
1808 S2_X(0xa) | S2_Y(0x6) |
1809 S3_X(0x6) | S3_Y(0xa)));
1810 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0
, (S0_X(0xe) | S0_Y(0xb) |
1811 S1_X(0x4) | S1_Y(0xc) |
1812 S2_X(0x1) | S2_Y(0x6) |
1813 S3_X(0xa) | S3_Y(0xe)));
1814 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1
, (S4_X(0x6) | S4_Y(0x1) |
1815 S5_X(0x0) | S5_Y(0x0) |
1816 S6_X(0xb) | S6_Y(0x4) |
1817 S7_X(0x7) | S7_Y(0x8)));
1819 WREG32(VGT_STRMOUT_EN
, 0);
1820 tmp
= rdev
->config
.r600
.max_pipes
* 16;
1821 switch (rdev
->family
) {
1837 WREG32(VGT_ES_PER_GS
, 128);
1838 WREG32(VGT_GS_PER_ES
, tmp
);
1839 WREG32(VGT_GS_PER_VS
, 2);
1840 WREG32(VGT_GS_VERTEX_REUSE
, 16);
1842 /* more default values. 2D/3D driver should adjust as needed */
1843 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
1844 WREG32(VGT_STRMOUT_EN
, 0);
1846 WREG32(PA_SC_MODE_CNTL
, 0);
1847 WREG32(PA_SC_AA_CONFIG
, 0);
1848 WREG32(PA_SC_LINE_STIPPLE
, 0);
1849 WREG32(SPI_INPUT_Z
, 0);
1850 WREG32(SPI_PS_IN_CONTROL_0
, NUM_INTERP(2));
1851 WREG32(CB_COLOR7_FRAG
, 0);
1853 /* Clear render buffer base addresses */
1854 WREG32(CB_COLOR0_BASE
, 0);
1855 WREG32(CB_COLOR1_BASE
, 0);
1856 WREG32(CB_COLOR2_BASE
, 0);
1857 WREG32(CB_COLOR3_BASE
, 0);
1858 WREG32(CB_COLOR4_BASE
, 0);
1859 WREG32(CB_COLOR5_BASE
, 0);
1860 WREG32(CB_COLOR6_BASE
, 0);
1861 WREG32(CB_COLOR7_BASE
, 0);
1862 WREG32(CB_COLOR7_FRAG
, 0);
1864 switch (rdev
->family
) {
1869 tmp
= TC_L2_SIZE(8);
1873 tmp
= TC_L2_SIZE(4);
1876 tmp
= TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT
;
1879 tmp
= TC_L2_SIZE(0);
1882 WREG32(TC_CNTL
, tmp
);
1884 tmp
= RREG32(HDP_HOST_PATH_CNTL
);
1885 WREG32(HDP_HOST_PATH_CNTL
, tmp
);
1887 tmp
= RREG32(ARB_POP
);
1888 tmp
|= ENABLE_TC128
;
1889 WREG32(ARB_POP
, tmp
);
1891 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
1892 WREG32(PA_CL_ENHANCE
, (CLIP_VTX_REORDER_ENA
|
1894 WREG32(PA_SC_ENHANCE
, FORCE_EOV_MAX_CLK_CNT(4095));
1899 * Indirect registers accessor
1901 u32
r600_pciep_rreg(struct radeon_device
*rdev
, u32 reg
)
1905 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
1906 (void)RREG32(PCIE_PORT_INDEX
);
1907 r
= RREG32(PCIE_PORT_DATA
);
1911 void r600_pciep_wreg(struct radeon_device
*rdev
, u32 reg
, u32 v
)
1913 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
1914 (void)RREG32(PCIE_PORT_INDEX
);
1915 WREG32(PCIE_PORT_DATA
, (v
));
1916 (void)RREG32(PCIE_PORT_DATA
);
1922 void r600_cp_stop(struct radeon_device
*rdev
)
1924 rdev
->mc
.active_vram_size
= rdev
->mc
.visible_vram_size
;
1925 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
1926 WREG32(SCRATCH_UMSK
, 0);
1929 int r600_init_microcode(struct radeon_device
*rdev
)
1931 struct platform_device
*pdev
;
1932 const char *chip_name
;
1933 const char *rlc_chip_name
;
1934 size_t pfp_req_size
, me_req_size
, rlc_req_size
;
1940 pdev
= platform_device_register_simple("radeon_cp", 0, NULL
, 0);
1943 printk(KERN_ERR
"radeon_cp: Failed to register firmware\n");
1947 switch (rdev
->family
) {
1950 rlc_chip_name
= "R600";
1953 chip_name
= "RV610";
1954 rlc_chip_name
= "R600";
1957 chip_name
= "RV630";
1958 rlc_chip_name
= "R600";
1961 chip_name
= "RV620";
1962 rlc_chip_name
= "R600";
1965 chip_name
= "RV635";
1966 rlc_chip_name
= "R600";
1969 chip_name
= "RV670";
1970 rlc_chip_name
= "R600";
1974 chip_name
= "RS780";
1975 rlc_chip_name
= "R600";
1978 chip_name
= "RV770";
1979 rlc_chip_name
= "R700";
1983 chip_name
= "RV730";
1984 rlc_chip_name
= "R700";
1987 chip_name
= "RV710";
1988 rlc_chip_name
= "R700";
1991 chip_name
= "CEDAR";
1992 rlc_chip_name
= "CEDAR";
1995 chip_name
= "REDWOOD";
1996 rlc_chip_name
= "REDWOOD";
1999 chip_name
= "JUNIPER";
2000 rlc_chip_name
= "JUNIPER";
2004 chip_name
= "CYPRESS";
2005 rlc_chip_name
= "CYPRESS";
2010 if (rdev
->family
>= CHIP_CEDAR
) {
2011 pfp_req_size
= EVERGREEN_PFP_UCODE_SIZE
* 4;
2012 me_req_size
= EVERGREEN_PM4_UCODE_SIZE
* 4;
2013 rlc_req_size
= EVERGREEN_RLC_UCODE_SIZE
* 4;
2014 } else if (rdev
->family
>= CHIP_RV770
) {
2015 pfp_req_size
= R700_PFP_UCODE_SIZE
* 4;
2016 me_req_size
= R700_PM4_UCODE_SIZE
* 4;
2017 rlc_req_size
= R700_RLC_UCODE_SIZE
* 4;
2019 pfp_req_size
= PFP_UCODE_SIZE
* 4;
2020 me_req_size
= PM4_UCODE_SIZE
* 12;
2021 rlc_req_size
= RLC_UCODE_SIZE
* 4;
2024 DRM_INFO("Loading %s Microcode\n", chip_name
);
2026 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_pfp.bin", chip_name
);
2027 err
= request_firmware(&rdev
->pfp_fw
, fw_name
, &pdev
->dev
);
2030 if (rdev
->pfp_fw
->size
!= pfp_req_size
) {
2032 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2033 rdev
->pfp_fw
->size
, fw_name
);
2038 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_me.bin", chip_name
);
2039 err
= request_firmware(&rdev
->me_fw
, fw_name
, &pdev
->dev
);
2042 if (rdev
->me_fw
->size
!= me_req_size
) {
2044 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2045 rdev
->me_fw
->size
, fw_name
);
2049 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_rlc.bin", rlc_chip_name
);
2050 err
= request_firmware(&rdev
->rlc_fw
, fw_name
, &pdev
->dev
);
2053 if (rdev
->rlc_fw
->size
!= rlc_req_size
) {
2055 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2056 rdev
->rlc_fw
->size
, fw_name
);
2061 platform_device_unregister(pdev
);
2066 "r600_cp: Failed to load firmware \"%s\"\n",
2068 release_firmware(rdev
->pfp_fw
);
2069 rdev
->pfp_fw
= NULL
;
2070 release_firmware(rdev
->me_fw
);
2072 release_firmware(rdev
->rlc_fw
);
2073 rdev
->rlc_fw
= NULL
;
2078 static int r600_cp_load_microcode(struct radeon_device
*rdev
)
2080 const __be32
*fw_data
;
2083 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
2088 WREG32(CP_RB_CNTL
, RB_NO_UPDATE
| RB_BLKSZ(15) | RB_BUFSZ(3));
2091 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
2092 RREG32(GRBM_SOFT_RESET
);
2094 WREG32(GRBM_SOFT_RESET
, 0);
2096 WREG32(CP_ME_RAM_WADDR
, 0);
2098 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
2099 WREG32(CP_ME_RAM_WADDR
, 0);
2100 for (i
= 0; i
< PM4_UCODE_SIZE
* 3; i
++)
2101 WREG32(CP_ME_RAM_DATA
,
2102 be32_to_cpup(fw_data
++));
2104 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
2105 WREG32(CP_PFP_UCODE_ADDR
, 0);
2106 for (i
= 0; i
< PFP_UCODE_SIZE
; i
++)
2107 WREG32(CP_PFP_UCODE_DATA
,
2108 be32_to_cpup(fw_data
++));
2110 WREG32(CP_PFP_UCODE_ADDR
, 0);
2111 WREG32(CP_ME_RAM_WADDR
, 0);
2112 WREG32(CP_ME_RAM_RADDR
, 0);
2116 int r600_cp_start(struct radeon_device
*rdev
)
2121 r
= radeon_ring_lock(rdev
, 7);
2123 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
2126 radeon_ring_write(rdev
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
2127 radeon_ring_write(rdev
, 0x1);
2128 if (rdev
->family
>= CHIP_RV770
) {
2129 radeon_ring_write(rdev
, 0x0);
2130 radeon_ring_write(rdev
, rdev
->config
.rv770
.max_hw_contexts
- 1);
2132 radeon_ring_write(rdev
, 0x3);
2133 radeon_ring_write(rdev
, rdev
->config
.r600
.max_hw_contexts
- 1);
2135 radeon_ring_write(rdev
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2136 radeon_ring_write(rdev
, 0);
2137 radeon_ring_write(rdev
, 0);
2138 radeon_ring_unlock_commit(rdev
);
2141 WREG32(R_0086D8_CP_ME_CNTL
, cp_me
);
2145 int r600_cp_resume(struct radeon_device
*rdev
)
2152 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
2153 RREG32(GRBM_SOFT_RESET
);
2155 WREG32(GRBM_SOFT_RESET
, 0);
2157 /* Set ring buffer size */
2158 rb_bufsz
= drm_order(rdev
->cp
.ring_size
/ 8);
2159 tmp
= (drm_order(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
2161 tmp
|= BUF_SWAP_32BIT
;
2163 WREG32(CP_RB_CNTL
, tmp
);
2164 WREG32(CP_SEM_WAIT_TIMER
, 0x4);
2166 /* Set the write pointer delay */
2167 WREG32(CP_RB_WPTR_DELAY
, 0);
2169 /* Initialize the ring buffer's read and write pointers */
2170 WREG32(CP_RB_CNTL
, tmp
| RB_RPTR_WR_ENA
);
2171 WREG32(CP_RB_RPTR_WR
, 0);
2172 WREG32(CP_RB_WPTR
, 0);
2174 /* set the wb address whether it's enabled or not */
2175 WREG32(CP_RB_RPTR_ADDR
, (rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFFFFFFFC);
2176 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFF);
2177 WREG32(SCRATCH_ADDR
, ((rdev
->wb
.gpu_addr
+ RADEON_WB_SCRATCH_OFFSET
) >> 8) & 0xFFFFFFFF);
2179 if (rdev
->wb
.enabled
)
2180 WREG32(SCRATCH_UMSK
, 0xff);
2182 tmp
|= RB_NO_UPDATE
;
2183 WREG32(SCRATCH_UMSK
, 0);
2187 WREG32(CP_RB_CNTL
, tmp
);
2189 WREG32(CP_RB_BASE
, rdev
->cp
.gpu_addr
>> 8);
2190 WREG32(CP_DEBUG
, (1 << 27) | (1 << 28));
2192 rdev
->cp
.rptr
= RREG32(CP_RB_RPTR
);
2193 rdev
->cp
.wptr
= RREG32(CP_RB_WPTR
);
2195 r600_cp_start(rdev
);
2196 rdev
->cp
.ready
= true;
2197 r
= radeon_ring_test(rdev
);
2199 rdev
->cp
.ready
= false;
2205 void r600_cp_commit(struct radeon_device
*rdev
)
2207 WREG32(CP_RB_WPTR
, rdev
->cp
.wptr
);
2208 (void)RREG32(CP_RB_WPTR
);
2211 void r600_ring_init(struct radeon_device
*rdev
, unsigned ring_size
)
2215 /* Align ring size */
2216 rb_bufsz
= drm_order(ring_size
/ 8);
2217 ring_size
= (1 << (rb_bufsz
+ 1)) * 4;
2218 rdev
->cp
.ring_size
= ring_size
;
2219 rdev
->cp
.align_mask
= 16 - 1;
2222 void r600_cp_fini(struct radeon_device
*rdev
)
2225 radeon_ring_fini(rdev
);
2230 * GPU scratch registers helpers function.
2232 void r600_scratch_init(struct radeon_device
*rdev
)
2236 rdev
->scratch
.num_reg
= 7;
2237 rdev
->scratch
.reg_base
= SCRATCH_REG0
;
2238 for (i
= 0; i
< rdev
->scratch
.num_reg
; i
++) {
2239 rdev
->scratch
.free
[i
] = true;
2240 rdev
->scratch
.reg
[i
] = rdev
->scratch
.reg_base
+ (i
* 4);
2244 int r600_ring_test(struct radeon_device
*rdev
)
2251 r
= radeon_scratch_get(rdev
, &scratch
);
2253 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r
);
2256 WREG32(scratch
, 0xCAFEDEAD);
2257 r
= radeon_ring_lock(rdev
, 3);
2259 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
2260 radeon_scratch_free(rdev
, scratch
);
2263 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2264 radeon_ring_write(rdev
, ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2265 radeon_ring_write(rdev
, 0xDEADBEEF);
2266 radeon_ring_unlock_commit(rdev
);
2267 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
2268 tmp
= RREG32(scratch
);
2269 if (tmp
== 0xDEADBEEF)
2273 if (i
< rdev
->usec_timeout
) {
2274 DRM_INFO("ring test succeeded in %d usecs\n", i
);
2276 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2280 radeon_scratch_free(rdev
, scratch
);
2284 void r600_fence_ring_emit(struct radeon_device
*rdev
,
2285 struct radeon_fence
*fence
)
2287 if (rdev
->wb
.use_event
) {
2288 u64 addr
= rdev
->wb
.gpu_addr
+ R600_WB_EVENT_OFFSET
+
2289 (u64
)(rdev
->fence_drv
.scratch_reg
- rdev
->scratch
.reg_base
);
2290 /* EVENT_WRITE_EOP - flush caches, send int */
2291 radeon_ring_write(rdev
, PACKET3(PACKET3_EVENT_WRITE_EOP
, 4));
2292 radeon_ring_write(rdev
, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS
) | EVENT_INDEX(5));
2293 radeon_ring_write(rdev
, addr
& 0xffffffff);
2294 radeon_ring_write(rdev
, (upper_32_bits(addr
) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2295 radeon_ring_write(rdev
, fence
->seq
);
2296 radeon_ring_write(rdev
, 0);
2298 radeon_ring_write(rdev
, PACKET3(PACKET3_EVENT_WRITE
, 0));
2299 radeon_ring_write(rdev
, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT
) | EVENT_INDEX(0));
2300 /* wait for 3D idle clean */
2301 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2302 radeon_ring_write(rdev
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2303 radeon_ring_write(rdev
, WAIT_3D_IDLE_bit
| WAIT_3D_IDLECLEAN_bit
);
2304 /* Emit fence sequence & fire IRQ */
2305 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2306 radeon_ring_write(rdev
, ((rdev
->fence_drv
.scratch_reg
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2307 radeon_ring_write(rdev
, fence
->seq
);
2308 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2309 radeon_ring_write(rdev
, PACKET0(CP_INT_STATUS
, 0));
2310 radeon_ring_write(rdev
, RB_INT_STAT
);
2314 int r600_copy_blit(struct radeon_device
*rdev
,
2315 uint64_t src_offset
, uint64_t dst_offset
,
2316 unsigned num_pages
, struct radeon_fence
*fence
)
2320 mutex_lock(&rdev
->r600_blit
.mutex
);
2321 rdev
->r600_blit
.vb_ib
= NULL
;
2322 r
= r600_blit_prepare_copy(rdev
, num_pages
* RADEON_GPU_PAGE_SIZE
);
2324 if (rdev
->r600_blit
.vb_ib
)
2325 radeon_ib_free(rdev
, &rdev
->r600_blit
.vb_ib
);
2326 mutex_unlock(&rdev
->r600_blit
.mutex
);
2329 r600_kms_blit_copy(rdev
, src_offset
, dst_offset
, num_pages
* RADEON_GPU_PAGE_SIZE
);
2330 r600_blit_done_copy(rdev
, fence
);
2331 mutex_unlock(&rdev
->r600_blit
.mutex
);
2335 int r600_set_surface_reg(struct radeon_device
*rdev
, int reg
,
2336 uint32_t tiling_flags
, uint32_t pitch
,
2337 uint32_t offset
, uint32_t obj_size
)
2339 /* FIXME: implement */
2343 void r600_clear_surface_reg(struct radeon_device
*rdev
, int reg
)
2345 /* FIXME: implement */
2349 bool r600_card_posted(struct radeon_device
*rdev
)
2353 /* first check CRTCs */
2354 reg
= RREG32(D1CRTC_CONTROL
) |
2355 RREG32(D2CRTC_CONTROL
);
2359 /* then check MEM_SIZE, in case the crtcs are off */
2360 if (RREG32(CONFIG_MEMSIZE
))
2366 int r600_startup(struct radeon_device
*rdev
)
2370 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
) {
2371 r
= r600_init_microcode(rdev
);
2373 DRM_ERROR("Failed to load firmware!\n");
2378 r600_mc_program(rdev
);
2379 if (rdev
->flags
& RADEON_IS_AGP
) {
2380 r600_agp_enable(rdev
);
2382 r
= r600_pcie_gart_enable(rdev
);
2386 r600_gpu_init(rdev
);
2387 r
= r600_blit_init(rdev
);
2389 r600_blit_fini(rdev
);
2390 rdev
->asic
->copy
= NULL
;
2391 dev_warn(rdev
->dev
, "failed blitter (%d) falling back to memcpy\n", r
);
2394 /* allocate wb buffer */
2395 r
= radeon_wb_init(rdev
);
2400 r
= r600_irq_init(rdev
);
2402 DRM_ERROR("radeon: IH init failed (%d).\n", r
);
2403 radeon_irq_kms_fini(rdev
);
2408 r
= radeon_ring_init(rdev
, rdev
->cp
.ring_size
);
2411 r
= r600_cp_load_microcode(rdev
);
2414 r
= r600_cp_resume(rdev
);
2421 void r600_vga_set_state(struct radeon_device
*rdev
, bool state
)
2425 temp
= RREG32(CONFIG_CNTL
);
2426 if (state
== false) {
2432 WREG32(CONFIG_CNTL
, temp
);
2435 int r600_resume(struct radeon_device
*rdev
)
2439 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2440 * posting will perform necessary task to bring back GPU into good
2444 atom_asic_init(rdev
->mode_info
.atom_context
);
2446 r
= r600_startup(rdev
);
2448 DRM_ERROR("r600 startup failed on resume\n");
2452 r
= r600_ib_test(rdev
);
2454 DRM_ERROR("radeon: failled testing IB (%d).\n", r
);
2458 r
= r600_audio_init(rdev
);
2460 DRM_ERROR("radeon: audio resume failed\n");
2467 int r600_suspend(struct radeon_device
*rdev
)
2471 r600_audio_fini(rdev
);
2472 /* FIXME: we should wait for ring to be empty */
2474 rdev
->cp
.ready
= false;
2475 r600_irq_suspend(rdev
);
2476 radeon_wb_disable(rdev
);
2477 r600_pcie_gart_disable(rdev
);
2478 /* unpin shaders bo */
2479 if (rdev
->r600_blit
.shader_obj
) {
2480 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
2482 radeon_bo_unpin(rdev
->r600_blit
.shader_obj
);
2483 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
2489 /* Plan is to move initialization in that function and use
2490 * helper function so that radeon_device_init pretty much
2491 * do nothing more than calling asic specific function. This
2492 * should also allow to remove a bunch of callback function
2495 int r600_init(struct radeon_device
*rdev
)
2499 r
= radeon_dummy_page_init(rdev
);
2502 if (r600_debugfs_mc_info_init(rdev
)) {
2503 DRM_ERROR("Failed to register debugfs file for mc !\n");
2505 /* This don't do much */
2506 r
= radeon_gem_init(rdev
);
2510 if (!radeon_get_bios(rdev
)) {
2511 if (ASIC_IS_AVIVO(rdev
))
2514 /* Must be an ATOMBIOS */
2515 if (!rdev
->is_atom_bios
) {
2516 dev_err(rdev
->dev
, "Expecting atombios for R600 GPU\n");
2519 r
= radeon_atombios_init(rdev
);
2522 /* Post card if necessary */
2523 if (!r600_card_posted(rdev
)) {
2525 dev_err(rdev
->dev
, "Card not posted and no BIOS - ignoring\n");
2528 DRM_INFO("GPU not posted. posting now...\n");
2529 atom_asic_init(rdev
->mode_info
.atom_context
);
2531 /* Initialize scratch registers */
2532 r600_scratch_init(rdev
);
2533 /* Initialize surface registers */
2534 radeon_surface_init(rdev
);
2535 /* Initialize clocks */
2536 radeon_get_clock_info(rdev
->ddev
);
2538 r
= radeon_fence_driver_init(rdev
);
2541 if (rdev
->flags
& RADEON_IS_AGP
) {
2542 r
= radeon_agp_init(rdev
);
2544 radeon_agp_disable(rdev
);
2546 r
= r600_mc_init(rdev
);
2549 /* Memory manager */
2550 r
= radeon_bo_init(rdev
);
2554 r
= radeon_irq_kms_init(rdev
);
2558 rdev
->cp
.ring_obj
= NULL
;
2559 r600_ring_init(rdev
, 1024 * 1024);
2561 rdev
->ih
.ring_obj
= NULL
;
2562 r600_ih_ring_init(rdev
, 64 * 1024);
2564 r
= r600_pcie_gart_init(rdev
);
2568 rdev
->accel_working
= true;
2569 r
= r600_startup(rdev
);
2571 dev_err(rdev
->dev
, "disabling GPU acceleration\n");
2573 r600_irq_fini(rdev
);
2574 radeon_wb_fini(rdev
);
2575 radeon_irq_kms_fini(rdev
);
2576 r600_pcie_gart_fini(rdev
);
2577 rdev
->accel_working
= false;
2579 if (rdev
->accel_working
) {
2580 r
= radeon_ib_pool_init(rdev
);
2582 dev_err(rdev
->dev
, "IB initialization failed (%d).\n", r
);
2583 rdev
->accel_working
= false;
2585 r
= r600_ib_test(rdev
);
2587 dev_err(rdev
->dev
, "IB test failed (%d).\n", r
);
2588 rdev
->accel_working
= false;
2593 r
= r600_audio_init(rdev
);
2595 return r
; /* TODO error handling */
2599 void r600_fini(struct radeon_device
*rdev
)
2601 r600_audio_fini(rdev
);
2602 r600_blit_fini(rdev
);
2604 r600_irq_fini(rdev
);
2605 radeon_wb_fini(rdev
);
2606 radeon_irq_kms_fini(rdev
);
2607 r600_pcie_gart_fini(rdev
);
2608 radeon_agp_fini(rdev
);
2609 radeon_gem_fini(rdev
);
2610 radeon_fence_driver_fini(rdev
);
2611 radeon_bo_fini(rdev
);
2612 radeon_atombios_fini(rdev
);
2615 radeon_dummy_page_fini(rdev
);
2622 void r600_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
2624 /* FIXME: implement */
2625 radeon_ring_write(rdev
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
2626 radeon_ring_write(rdev
, ib
->gpu_addr
& 0xFFFFFFFC);
2627 radeon_ring_write(rdev
, upper_32_bits(ib
->gpu_addr
) & 0xFF);
2628 radeon_ring_write(rdev
, ib
->length_dw
);
2631 int r600_ib_test(struct radeon_device
*rdev
)
2633 struct radeon_ib
*ib
;
2639 r
= radeon_scratch_get(rdev
, &scratch
);
2641 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r
);
2644 WREG32(scratch
, 0xCAFEDEAD);
2645 r
= radeon_ib_get(rdev
, &ib
);
2647 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
2650 ib
->ptr
[0] = PACKET3(PACKET3_SET_CONFIG_REG
, 1);
2651 ib
->ptr
[1] = ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2652 ib
->ptr
[2] = 0xDEADBEEF;
2653 ib
->ptr
[3] = PACKET2(0);
2654 ib
->ptr
[4] = PACKET2(0);
2655 ib
->ptr
[5] = PACKET2(0);
2656 ib
->ptr
[6] = PACKET2(0);
2657 ib
->ptr
[7] = PACKET2(0);
2658 ib
->ptr
[8] = PACKET2(0);
2659 ib
->ptr
[9] = PACKET2(0);
2660 ib
->ptr
[10] = PACKET2(0);
2661 ib
->ptr
[11] = PACKET2(0);
2662 ib
->ptr
[12] = PACKET2(0);
2663 ib
->ptr
[13] = PACKET2(0);
2664 ib
->ptr
[14] = PACKET2(0);
2665 ib
->ptr
[15] = PACKET2(0);
2667 r
= radeon_ib_schedule(rdev
, ib
);
2669 radeon_scratch_free(rdev
, scratch
);
2670 radeon_ib_free(rdev
, &ib
);
2671 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
2674 r
= radeon_fence_wait(ib
->fence
, false);
2676 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
2679 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
2680 tmp
= RREG32(scratch
);
2681 if (tmp
== 0xDEADBEEF)
2685 if (i
< rdev
->usec_timeout
) {
2686 DRM_INFO("ib test succeeded in %u usecs\n", i
);
2688 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2692 radeon_scratch_free(rdev
, scratch
);
2693 radeon_ib_free(rdev
, &ib
);
2700 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2701 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2702 * writing to the ring and the GPU consuming, the GPU writes to the ring
2703 * and host consumes. As the host irq handler processes interrupts, it
2704 * increments the rptr. When the rptr catches up with the wptr, all the
2705 * current interrupts have been processed.
2708 void r600_ih_ring_init(struct radeon_device
*rdev
, unsigned ring_size
)
2712 /* Align ring size */
2713 rb_bufsz
= drm_order(ring_size
/ 4);
2714 ring_size
= (1 << rb_bufsz
) * 4;
2715 rdev
->ih
.ring_size
= ring_size
;
2716 rdev
->ih
.ptr_mask
= rdev
->ih
.ring_size
- 1;
2720 static int r600_ih_ring_alloc(struct radeon_device
*rdev
)
2724 /* Allocate ring buffer */
2725 if (rdev
->ih
.ring_obj
== NULL
) {
2726 r
= radeon_bo_create(rdev
, NULL
, rdev
->ih
.ring_size
,
2728 RADEON_GEM_DOMAIN_GTT
,
2729 &rdev
->ih
.ring_obj
);
2731 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r
);
2734 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
2735 if (unlikely(r
!= 0))
2737 r
= radeon_bo_pin(rdev
->ih
.ring_obj
,
2738 RADEON_GEM_DOMAIN_GTT
,
2739 &rdev
->ih
.gpu_addr
);
2741 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
2742 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r
);
2745 r
= radeon_bo_kmap(rdev
->ih
.ring_obj
,
2746 (void **)&rdev
->ih
.ring
);
2747 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
2749 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r
);
2756 static void r600_ih_ring_fini(struct radeon_device
*rdev
)
2759 if (rdev
->ih
.ring_obj
) {
2760 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
2761 if (likely(r
== 0)) {
2762 radeon_bo_kunmap(rdev
->ih
.ring_obj
);
2763 radeon_bo_unpin(rdev
->ih
.ring_obj
);
2764 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
2766 radeon_bo_unref(&rdev
->ih
.ring_obj
);
2767 rdev
->ih
.ring
= NULL
;
2768 rdev
->ih
.ring_obj
= NULL
;
2772 void r600_rlc_stop(struct radeon_device
*rdev
)
2775 if ((rdev
->family
>= CHIP_RV770
) &&
2776 (rdev
->family
<= CHIP_RV740
)) {
2777 /* r7xx asics need to soft reset RLC before halting */
2778 WREG32(SRBM_SOFT_RESET
, SOFT_RESET_RLC
);
2779 RREG32(SRBM_SOFT_RESET
);
2781 WREG32(SRBM_SOFT_RESET
, 0);
2782 RREG32(SRBM_SOFT_RESET
);
2785 WREG32(RLC_CNTL
, 0);
2788 static void r600_rlc_start(struct radeon_device
*rdev
)
2790 WREG32(RLC_CNTL
, RLC_ENABLE
);
2793 static int r600_rlc_init(struct radeon_device
*rdev
)
2796 const __be32
*fw_data
;
2801 r600_rlc_stop(rdev
);
2803 WREG32(RLC_HB_BASE
, 0);
2804 WREG32(RLC_HB_CNTL
, 0);
2805 WREG32(RLC_HB_RPTR
, 0);
2806 WREG32(RLC_HB_WPTR
, 0);
2807 WREG32(RLC_HB_WPTR_LSB_ADDR
, 0);
2808 WREG32(RLC_HB_WPTR_MSB_ADDR
, 0);
2809 WREG32(RLC_MC_CNTL
, 0);
2810 WREG32(RLC_UCODE_CNTL
, 0);
2812 fw_data
= (const __be32
*)rdev
->rlc_fw
->data
;
2813 if (rdev
->family
>= CHIP_CEDAR
) {
2814 for (i
= 0; i
< EVERGREEN_RLC_UCODE_SIZE
; i
++) {
2815 WREG32(RLC_UCODE_ADDR
, i
);
2816 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2818 } else if (rdev
->family
>= CHIP_RV770
) {
2819 for (i
= 0; i
< R700_RLC_UCODE_SIZE
; i
++) {
2820 WREG32(RLC_UCODE_ADDR
, i
);
2821 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2824 for (i
= 0; i
< RLC_UCODE_SIZE
; i
++) {
2825 WREG32(RLC_UCODE_ADDR
, i
);
2826 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2829 WREG32(RLC_UCODE_ADDR
, 0);
2831 r600_rlc_start(rdev
);
2836 static void r600_enable_interrupts(struct radeon_device
*rdev
)
2838 u32 ih_cntl
= RREG32(IH_CNTL
);
2839 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
2841 ih_cntl
|= ENABLE_INTR
;
2842 ih_rb_cntl
|= IH_RB_ENABLE
;
2843 WREG32(IH_CNTL
, ih_cntl
);
2844 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2845 rdev
->ih
.enabled
= true;
2848 void r600_disable_interrupts(struct radeon_device
*rdev
)
2850 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
2851 u32 ih_cntl
= RREG32(IH_CNTL
);
2853 ih_rb_cntl
&= ~IH_RB_ENABLE
;
2854 ih_cntl
&= ~ENABLE_INTR
;
2855 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2856 WREG32(IH_CNTL
, ih_cntl
);
2857 /* set rptr, wptr to 0 */
2858 WREG32(IH_RB_RPTR
, 0);
2859 WREG32(IH_RB_WPTR
, 0);
2860 rdev
->ih
.enabled
= false;
2865 static void r600_disable_interrupt_state(struct radeon_device
*rdev
)
2869 WREG32(CP_INT_CNTL
, CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
);
2870 WREG32(GRBM_INT_CNTL
, 0);
2871 WREG32(DxMODE_INT_MASK
, 0);
2872 if (ASIC_IS_DCE3(rdev
)) {
2873 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL
, 0);
2874 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL
, 0);
2875 tmp
= RREG32(DC_HPD1_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2876 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
2877 tmp
= RREG32(DC_HPD2_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2878 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
2879 tmp
= RREG32(DC_HPD3_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2880 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
2881 tmp
= RREG32(DC_HPD4_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2882 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
2883 if (ASIC_IS_DCE32(rdev
)) {
2884 tmp
= RREG32(DC_HPD5_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2885 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
2886 tmp
= RREG32(DC_HPD6_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2887 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
2890 WREG32(DACA_AUTODETECT_INT_CONTROL
, 0);
2891 WREG32(DACB_AUTODETECT_INT_CONTROL
, 0);
2892 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
2893 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
2894 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
2895 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
2896 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
2897 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
2901 int r600_irq_init(struct radeon_device
*rdev
)
2905 u32 interrupt_cntl
, ih_cntl
, ih_rb_cntl
;
2908 ret
= r600_ih_ring_alloc(rdev
);
2913 r600_disable_interrupts(rdev
);
2916 ret
= r600_rlc_init(rdev
);
2918 r600_ih_ring_fini(rdev
);
2922 /* setup interrupt control */
2923 /* set dummy read address to ring address */
2924 WREG32(INTERRUPT_CNTL2
, rdev
->ih
.gpu_addr
>> 8);
2925 interrupt_cntl
= RREG32(INTERRUPT_CNTL
);
2926 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2927 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2929 interrupt_cntl
&= ~IH_DUMMY_RD_OVERRIDE
;
2930 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2931 interrupt_cntl
&= ~IH_REQ_NONSNOOP_EN
;
2932 WREG32(INTERRUPT_CNTL
, interrupt_cntl
);
2934 WREG32(IH_RB_BASE
, rdev
->ih
.gpu_addr
>> 8);
2935 rb_bufsz
= drm_order(rdev
->ih
.ring_size
/ 4);
2937 ih_rb_cntl
= (IH_WPTR_OVERFLOW_ENABLE
|
2938 IH_WPTR_OVERFLOW_CLEAR
|
2941 if (rdev
->wb
.enabled
)
2942 ih_rb_cntl
|= IH_WPTR_WRITEBACK_ENABLE
;
2944 /* set the writeback address whether it's enabled or not */
2945 WREG32(IH_RB_WPTR_ADDR_LO
, (rdev
->wb
.gpu_addr
+ R600_WB_IH_WPTR_OFFSET
) & 0xFFFFFFFC);
2946 WREG32(IH_RB_WPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ R600_WB_IH_WPTR_OFFSET
) & 0xFF);
2948 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2950 /* set rptr, wptr to 0 */
2951 WREG32(IH_RB_RPTR
, 0);
2952 WREG32(IH_RB_WPTR
, 0);
2954 /* Default settings for IH_CNTL (disabled at first) */
2955 ih_cntl
= MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2956 /* RPTR_REARM only works if msi's are enabled */
2957 if (rdev
->msi_enabled
)
2958 ih_cntl
|= RPTR_REARM
;
2961 ih_cntl
|= IH_MC_SWAP(IH_MC_SWAP_32BIT
);
2963 WREG32(IH_CNTL
, ih_cntl
);
2965 /* force the active interrupt state to all disabled */
2966 if (rdev
->family
>= CHIP_CEDAR
)
2967 evergreen_disable_interrupt_state(rdev
);
2969 r600_disable_interrupt_state(rdev
);
2972 r600_enable_interrupts(rdev
);
2977 void r600_irq_suspend(struct radeon_device
*rdev
)
2979 r600_irq_disable(rdev
);
2980 r600_rlc_stop(rdev
);
2983 void r600_irq_fini(struct radeon_device
*rdev
)
2985 r600_irq_suspend(rdev
);
2986 r600_ih_ring_fini(rdev
);
2989 int r600_irq_set(struct radeon_device
*rdev
)
2991 u32 cp_int_cntl
= CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
;
2993 u32 hpd1
, hpd2
, hpd3
, hpd4
= 0, hpd5
= 0, hpd6
= 0;
2994 u32 grbm_int_cntl
= 0;
2997 if (!rdev
->irq
.installed
) {
2998 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
3001 /* don't enable anything if the ih is disabled */
3002 if (!rdev
->ih
.enabled
) {
3003 r600_disable_interrupts(rdev
);
3004 /* force the active interrupt state to all disabled */
3005 r600_disable_interrupt_state(rdev
);
3009 hdmi1
= RREG32(R600_HDMI_BLOCK1
+ R600_HDMI_CNTL
) & ~R600_HDMI_INT_EN
;
3010 if (ASIC_IS_DCE3(rdev
)) {
3011 hdmi2
= RREG32(R600_HDMI_BLOCK3
+ R600_HDMI_CNTL
) & ~R600_HDMI_INT_EN
;
3012 hpd1
= RREG32(DC_HPD1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3013 hpd2
= RREG32(DC_HPD2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3014 hpd3
= RREG32(DC_HPD3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3015 hpd4
= RREG32(DC_HPD4_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3016 if (ASIC_IS_DCE32(rdev
)) {
3017 hpd5
= RREG32(DC_HPD5_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3018 hpd6
= RREG32(DC_HPD6_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3021 hdmi2
= RREG32(R600_HDMI_BLOCK2
+ R600_HDMI_CNTL
) & ~R600_HDMI_INT_EN
;
3022 hpd1
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3023 hpd2
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3024 hpd3
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3027 if (rdev
->irq
.sw_int
) {
3028 DRM_DEBUG("r600_irq_set: sw int\n");
3029 cp_int_cntl
|= RB_INT_ENABLE
;
3030 cp_int_cntl
|= TIME_STAMP_INT_ENABLE
;
3032 if (rdev
->irq
.crtc_vblank_int
[0]) {
3033 DRM_DEBUG("r600_irq_set: vblank 0\n");
3034 mode_int
|= D1MODE_VBLANK_INT_MASK
;
3036 if (rdev
->irq
.crtc_vblank_int
[1]) {
3037 DRM_DEBUG("r600_irq_set: vblank 1\n");
3038 mode_int
|= D2MODE_VBLANK_INT_MASK
;
3040 if (rdev
->irq
.hpd
[0]) {
3041 DRM_DEBUG("r600_irq_set: hpd 1\n");
3042 hpd1
|= DC_HPDx_INT_EN
;
3044 if (rdev
->irq
.hpd
[1]) {
3045 DRM_DEBUG("r600_irq_set: hpd 2\n");
3046 hpd2
|= DC_HPDx_INT_EN
;
3048 if (rdev
->irq
.hpd
[2]) {
3049 DRM_DEBUG("r600_irq_set: hpd 3\n");
3050 hpd3
|= DC_HPDx_INT_EN
;
3052 if (rdev
->irq
.hpd
[3]) {
3053 DRM_DEBUG("r600_irq_set: hpd 4\n");
3054 hpd4
|= DC_HPDx_INT_EN
;
3056 if (rdev
->irq
.hpd
[4]) {
3057 DRM_DEBUG("r600_irq_set: hpd 5\n");
3058 hpd5
|= DC_HPDx_INT_EN
;
3060 if (rdev
->irq
.hpd
[5]) {
3061 DRM_DEBUG("r600_irq_set: hpd 6\n");
3062 hpd6
|= DC_HPDx_INT_EN
;
3064 if (rdev
->irq
.hdmi
[0]) {
3065 DRM_DEBUG("r600_irq_set: hdmi 1\n");
3066 hdmi1
|= R600_HDMI_INT_EN
;
3068 if (rdev
->irq
.hdmi
[1]) {
3069 DRM_DEBUG("r600_irq_set: hdmi 2\n");
3070 hdmi2
|= R600_HDMI_INT_EN
;
3072 if (rdev
->irq
.gui_idle
) {
3073 DRM_DEBUG("gui idle\n");
3074 grbm_int_cntl
|= GUI_IDLE_INT_ENABLE
;
3077 WREG32(CP_INT_CNTL
, cp_int_cntl
);
3078 WREG32(DxMODE_INT_MASK
, mode_int
);
3079 WREG32(GRBM_INT_CNTL
, grbm_int_cntl
);
3080 WREG32(R600_HDMI_BLOCK1
+ R600_HDMI_CNTL
, hdmi1
);
3081 if (ASIC_IS_DCE3(rdev
)) {
3082 WREG32(R600_HDMI_BLOCK3
+ R600_HDMI_CNTL
, hdmi2
);
3083 WREG32(DC_HPD1_INT_CONTROL
, hpd1
);
3084 WREG32(DC_HPD2_INT_CONTROL
, hpd2
);
3085 WREG32(DC_HPD3_INT_CONTROL
, hpd3
);
3086 WREG32(DC_HPD4_INT_CONTROL
, hpd4
);
3087 if (ASIC_IS_DCE32(rdev
)) {
3088 WREG32(DC_HPD5_INT_CONTROL
, hpd5
);
3089 WREG32(DC_HPD6_INT_CONTROL
, hpd6
);
3092 WREG32(R600_HDMI_BLOCK2
+ R600_HDMI_CNTL
, hdmi2
);
3093 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, hpd1
);
3094 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, hpd2
);
3095 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, hpd3
);
3101 static inline void r600_irq_ack(struct radeon_device
*rdev
,
3104 u32
*disp_int_cont2
)
3108 if (ASIC_IS_DCE3(rdev
)) {
3109 *disp_int
= RREG32(DCE3_DISP_INTERRUPT_STATUS
);
3110 *disp_int_cont
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE
);
3111 *disp_int_cont2
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2
);
3113 *disp_int
= RREG32(DISP_INTERRUPT_STATUS
);
3114 *disp_int_cont
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE
);
3115 *disp_int_cont2
= 0;
3118 if (*disp_int
& LB_D1_VBLANK_INTERRUPT
)
3119 WREG32(D1MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
3120 if (*disp_int
& LB_D1_VLINE_INTERRUPT
)
3121 WREG32(D1MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
3122 if (*disp_int
& LB_D2_VBLANK_INTERRUPT
)
3123 WREG32(D2MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
3124 if (*disp_int
& LB_D2_VLINE_INTERRUPT
)
3125 WREG32(D2MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
3126 if (*disp_int
& DC_HPD1_INTERRUPT
) {
3127 if (ASIC_IS_DCE3(rdev
)) {
3128 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
3129 tmp
|= DC_HPDx_INT_ACK
;
3130 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
3132 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
3133 tmp
|= DC_HPDx_INT_ACK
;
3134 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
3137 if (*disp_int
& DC_HPD2_INTERRUPT
) {
3138 if (ASIC_IS_DCE3(rdev
)) {
3139 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
3140 tmp
|= DC_HPDx_INT_ACK
;
3141 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
3143 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
3144 tmp
|= DC_HPDx_INT_ACK
;
3145 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
3148 if (*disp_int_cont
& DC_HPD3_INTERRUPT
) {
3149 if (ASIC_IS_DCE3(rdev
)) {
3150 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
3151 tmp
|= DC_HPDx_INT_ACK
;
3152 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
3154 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
3155 tmp
|= DC_HPDx_INT_ACK
;
3156 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
3159 if (*disp_int_cont
& DC_HPD4_INTERRUPT
) {
3160 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
3161 tmp
|= DC_HPDx_INT_ACK
;
3162 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
3164 if (ASIC_IS_DCE32(rdev
)) {
3165 if (*disp_int_cont2
& DC_HPD5_INTERRUPT
) {
3166 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3167 tmp
|= DC_HPDx_INT_ACK
;
3168 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
3170 if (*disp_int_cont2
& DC_HPD6_INTERRUPT
) {
3171 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3172 tmp
|= DC_HPDx_INT_ACK
;
3173 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
3176 if (RREG32(R600_HDMI_BLOCK1
+ R600_HDMI_STATUS
) & R600_HDMI_INT_PENDING
) {
3177 WREG32_P(R600_HDMI_BLOCK1
+ R600_HDMI_CNTL
, R600_HDMI_INT_ACK
, ~R600_HDMI_INT_ACK
);
3179 if (ASIC_IS_DCE3(rdev
)) {
3180 if (RREG32(R600_HDMI_BLOCK3
+ R600_HDMI_STATUS
) & R600_HDMI_INT_PENDING
) {
3181 WREG32_P(R600_HDMI_BLOCK3
+ R600_HDMI_CNTL
, R600_HDMI_INT_ACK
, ~R600_HDMI_INT_ACK
);
3184 if (RREG32(R600_HDMI_BLOCK2
+ R600_HDMI_STATUS
) & R600_HDMI_INT_PENDING
) {
3185 WREG32_P(R600_HDMI_BLOCK2
+ R600_HDMI_CNTL
, R600_HDMI_INT_ACK
, ~R600_HDMI_INT_ACK
);
3190 void r600_irq_disable(struct radeon_device
*rdev
)
3192 u32 disp_int
, disp_int_cont
, disp_int_cont2
;
3194 r600_disable_interrupts(rdev
);
3195 /* Wait and acknowledge irq */
3197 r600_irq_ack(rdev
, &disp_int
, &disp_int_cont
, &disp_int_cont2
);
3198 r600_disable_interrupt_state(rdev
);
3201 static inline u32
r600_get_ih_wptr(struct radeon_device
*rdev
)
3205 if (rdev
->wb
.enabled
)
3206 wptr
= rdev
->wb
.wb
[R600_WB_IH_WPTR_OFFSET
/4];
3208 wptr
= RREG32(IH_RB_WPTR
);
3210 if (wptr
& RB_OVERFLOW
) {
3211 /* When a ring buffer overflow happen start parsing interrupt
3212 * from the last not overwritten vector (wptr + 16). Hopefully
3213 * this should allow us to catchup.
3215 dev_warn(rdev
->dev
, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3216 wptr
, rdev
->ih
.rptr
, (wptr
+ 16) + rdev
->ih
.ptr_mask
);
3217 rdev
->ih
.rptr
= (wptr
+ 16) & rdev
->ih
.ptr_mask
;
3218 tmp
= RREG32(IH_RB_CNTL
);
3219 tmp
|= IH_WPTR_OVERFLOW_CLEAR
;
3220 WREG32(IH_RB_CNTL
, tmp
);
3222 return (wptr
& rdev
->ih
.ptr_mask
);
3226 * Each IV ring entry is 128 bits:
3227 * [7:0] - interrupt source id
3229 * [59:32] - interrupt source data
3230 * [127:60] - reserved
3232 * The basic interrupt vector entries
3233 * are decoded as follows:
3234 * src_id src_data description
3239 * 19 0 FP Hot plug detection A
3240 * 19 1 FP Hot plug detection B
3241 * 19 2 DAC A auto-detection
3242 * 19 3 DAC B auto-detection
3248 * 181 - EOP Interrupt
3251 * Note, these are based on r600 and may need to be
3252 * adjusted or added to on newer asics
3255 int r600_irq_process(struct radeon_device
*rdev
)
3257 u32 wptr
= r600_get_ih_wptr(rdev
);
3258 u32 rptr
= rdev
->ih
.rptr
;
3259 u32 src_id
, src_data
;
3260 u32 ring_index
, disp_int
, disp_int_cont
, disp_int_cont2
;
3261 unsigned long flags
;
3262 bool queue_hotplug
= false;
3264 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr
, wptr
);
3265 if (!rdev
->ih
.enabled
)
3268 spin_lock_irqsave(&rdev
->ih
.lock
, flags
);
3271 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
3274 if (rdev
->shutdown
) {
3275 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
3280 /* display interrupts */
3281 r600_irq_ack(rdev
, &disp_int
, &disp_int_cont
, &disp_int_cont2
);
3283 rdev
->ih
.wptr
= wptr
;
3284 while (rptr
!= wptr
) {
3285 /* wptr/rptr are in bytes! */
3286 ring_index
= rptr
/ 4;
3287 src_id
= rdev
->ih
.ring
[ring_index
] & 0xff;
3288 src_data
= rdev
->ih
.ring
[ring_index
+ 1] & 0xfffffff;
3291 case 1: /* D1 vblank/vline */
3293 case 0: /* D1 vblank */
3294 if (disp_int
& LB_D1_VBLANK_INTERRUPT
) {
3295 drm_handle_vblank(rdev
->ddev
, 0);
3296 rdev
->pm
.vblank_sync
= true;
3297 wake_up(&rdev
->irq
.vblank_queue
);
3298 disp_int
&= ~LB_D1_VBLANK_INTERRUPT
;
3299 DRM_DEBUG("IH: D1 vblank\n");
3302 case 1: /* D1 vline */
3303 if (disp_int
& LB_D1_VLINE_INTERRUPT
) {
3304 disp_int
&= ~LB_D1_VLINE_INTERRUPT
;
3305 DRM_DEBUG("IH: D1 vline\n");
3309 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3313 case 5: /* D2 vblank/vline */
3315 case 0: /* D2 vblank */
3316 if (disp_int
& LB_D2_VBLANK_INTERRUPT
) {
3317 drm_handle_vblank(rdev
->ddev
, 1);
3318 rdev
->pm
.vblank_sync
= true;
3319 wake_up(&rdev
->irq
.vblank_queue
);
3320 disp_int
&= ~LB_D2_VBLANK_INTERRUPT
;
3321 DRM_DEBUG("IH: D2 vblank\n");
3324 case 1: /* D1 vline */
3325 if (disp_int
& LB_D2_VLINE_INTERRUPT
) {
3326 disp_int
&= ~LB_D2_VLINE_INTERRUPT
;
3327 DRM_DEBUG("IH: D2 vline\n");
3331 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3335 case 19: /* HPD/DAC hotplug */
3338 if (disp_int
& DC_HPD1_INTERRUPT
) {
3339 disp_int
&= ~DC_HPD1_INTERRUPT
;
3340 queue_hotplug
= true;
3341 DRM_DEBUG("IH: HPD1\n");
3345 if (disp_int
& DC_HPD2_INTERRUPT
) {
3346 disp_int
&= ~DC_HPD2_INTERRUPT
;
3347 queue_hotplug
= true;
3348 DRM_DEBUG("IH: HPD2\n");
3352 if (disp_int_cont
& DC_HPD3_INTERRUPT
) {
3353 disp_int_cont
&= ~DC_HPD3_INTERRUPT
;
3354 queue_hotplug
= true;
3355 DRM_DEBUG("IH: HPD3\n");
3359 if (disp_int_cont
& DC_HPD4_INTERRUPT
) {
3360 disp_int_cont
&= ~DC_HPD4_INTERRUPT
;
3361 queue_hotplug
= true;
3362 DRM_DEBUG("IH: HPD4\n");
3366 if (disp_int_cont2
& DC_HPD5_INTERRUPT
) {
3367 disp_int_cont2
&= ~DC_HPD5_INTERRUPT
;
3368 queue_hotplug
= true;
3369 DRM_DEBUG("IH: HPD5\n");
3373 if (disp_int_cont2
& DC_HPD6_INTERRUPT
) {
3374 disp_int_cont2
&= ~DC_HPD6_INTERRUPT
;
3375 queue_hotplug
= true;
3376 DRM_DEBUG("IH: HPD6\n");
3380 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3385 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data
);
3386 r600_audio_schedule_polling(rdev
);
3388 case 176: /* CP_INT in ring buffer */
3389 case 177: /* CP_INT in IB1 */
3390 case 178: /* CP_INT in IB2 */
3391 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data
);
3392 radeon_fence_process(rdev
);
3394 case 181: /* CP EOP event */
3395 DRM_DEBUG("IH: CP EOP\n");
3396 radeon_fence_process(rdev
);
3398 case 233: /* GUI IDLE */
3399 DRM_DEBUG("IH: CP EOP\n");
3400 rdev
->pm
.gui_idle
= true;
3401 wake_up(&rdev
->irq
.idle_queue
);
3404 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3408 /* wptr/rptr are in bytes! */
3410 rptr
&= rdev
->ih
.ptr_mask
;
3412 /* make sure wptr hasn't changed while processing */
3413 wptr
= r600_get_ih_wptr(rdev
);
3414 if (wptr
!= rdev
->ih
.wptr
)
3417 queue_work(rdev
->wq
, &rdev
->hotplug_work
);
3418 rdev
->ih
.rptr
= rptr
;
3419 WREG32(IH_RB_RPTR
, rdev
->ih
.rptr
);
3420 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
3427 #if defined(CONFIG_DEBUG_FS)
3429 static int r600_debugfs_cp_ring_info(struct seq_file
*m
, void *data
)
3431 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
3432 struct drm_device
*dev
= node
->minor
->dev
;
3433 struct radeon_device
*rdev
= dev
->dev_private
;
3434 unsigned count
, i
, j
;
3436 radeon_ring_free_size(rdev
);
3437 count
= (rdev
->cp
.ring_size
/ 4) - rdev
->cp
.ring_free_dw
;
3438 seq_printf(m
, "CP_STAT 0x%08x\n", RREG32(CP_STAT
));
3439 seq_printf(m
, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR
));
3440 seq_printf(m
, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR
));
3441 seq_printf(m
, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev
->cp
.wptr
);
3442 seq_printf(m
, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev
->cp
.rptr
);
3443 seq_printf(m
, "%u free dwords in ring\n", rdev
->cp
.ring_free_dw
);
3444 seq_printf(m
, "%u dwords in ring\n", count
);
3446 for (j
= 0; j
<= count
; j
++) {
3447 seq_printf(m
, "r[%04d]=0x%08x\n", i
, rdev
->cp
.ring
[i
]);
3448 i
= (i
+ 1) & rdev
->cp
.ptr_mask
;
3453 static int r600_debugfs_mc_info(struct seq_file
*m
, void *data
)
3455 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
3456 struct drm_device
*dev
= node
->minor
->dev
;
3457 struct radeon_device
*rdev
= dev
->dev_private
;
3459 DREG32_SYS(m
, rdev
, R_000E50_SRBM_STATUS
);
3460 DREG32_SYS(m
, rdev
, VM_L2_STATUS
);
3464 static struct drm_info_list r600_mc_info_list
[] = {
3465 {"r600_mc_info", r600_debugfs_mc_info
, 0, NULL
},
3466 {"r600_ring_info", r600_debugfs_cp_ring_info
, 0, NULL
},
3470 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
)
3472 #if defined(CONFIG_DEBUG_FS)
3473 return radeon_debugfs_add_files(rdev
, r600_mc_info_list
, ARRAY_SIZE(r600_mc_info_list
));
3480 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3481 * rdev: radeon device structure
3482 * bo: buffer object struct which userspace is waiting for idle
3484 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3485 * through ring buffer, this leads to corruption in rendering, see
3486 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3487 * directly perform HDP flush by writing register through MMIO.
3489 void r600_ioctl_wait_idle(struct radeon_device
*rdev
, struct radeon_bo
*bo
)
3491 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3492 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
3494 if ((rdev
->family
>= CHIP_RV770
) && (rdev
->family
<= CHIP_RV740
) &&
3495 rdev
->vram_scratch
.ptr
) {
3496 void __iomem
*ptr
= (void *)rdev
->vram_scratch
.ptr
;
3499 WREG32(HDP_DEBUG1
, 0);
3500 tmp
= readl((void __iomem
*)ptr
);
3502 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);