2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
33 #include "radeon_drm.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
41 #define PFP_UCODE_SIZE 576
42 #define PM4_UCODE_SIZE 1792
43 #define RLC_UCODE_SIZE 768
44 #define R700_PFP_UCODE_SIZE 848
45 #define R700_PM4_UCODE_SIZE 1360
46 #define R700_RLC_UCODE_SIZE 1024
47 #define EVERGREEN_PFP_UCODE_SIZE 1120
48 #define EVERGREEN_PM4_UCODE_SIZE 1376
49 #define EVERGREEN_RLC_UCODE_SIZE 768
52 MODULE_FIRMWARE("radeon/R600_pfp.bin");
53 MODULE_FIRMWARE("radeon/R600_me.bin");
54 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55 MODULE_FIRMWARE("radeon/RV610_me.bin");
56 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57 MODULE_FIRMWARE("radeon/RV630_me.bin");
58 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59 MODULE_FIRMWARE("radeon/RV620_me.bin");
60 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV635_me.bin");
62 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63 MODULE_FIRMWARE("radeon/RV670_me.bin");
64 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65 MODULE_FIRMWARE("radeon/RS780_me.bin");
66 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67 MODULE_FIRMWARE("radeon/RV770_me.bin");
68 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69 MODULE_FIRMWARE("radeon/RV730_me.bin");
70 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71 MODULE_FIRMWARE("radeon/RV710_me.bin");
72 MODULE_FIRMWARE("radeon/R600_rlc.bin");
73 MODULE_FIRMWARE("radeon/R700_rlc.bin");
74 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
76 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
77 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
79 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
82 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
87 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
);
89 /* r600,rv610,rv630,rv620,rv635,rv670 */
90 int r600_mc_wait_for_idle(struct radeon_device
*rdev
);
91 void r600_gpu_init(struct radeon_device
*rdev
);
92 void r600_fini(struct radeon_device
*rdev
);
93 void r600_irq_disable(struct radeon_device
*rdev
);
95 void r600_pm_get_dynpm_state(struct radeon_device
*rdev
)
99 rdev
->pm
.dynpm_can_upclock
= true;
100 rdev
->pm
.dynpm_can_downclock
= true;
102 /* power state array is low to high, default is first */
103 if ((rdev
->flags
& RADEON_IS_IGP
) || (rdev
->family
== CHIP_R600
)) {
104 int min_power_state_index
= 0;
106 if (rdev
->pm
.num_power_states
> 2)
107 min_power_state_index
= 1;
109 switch (rdev
->pm
.dynpm_planned_action
) {
110 case DYNPM_ACTION_MINIMUM
:
111 rdev
->pm
.requested_power_state_index
= min_power_state_index
;
112 rdev
->pm
.requested_clock_mode_index
= 0;
113 rdev
->pm
.dynpm_can_downclock
= false;
115 case DYNPM_ACTION_DOWNCLOCK
:
116 if (rdev
->pm
.current_power_state_index
== min_power_state_index
) {
117 rdev
->pm
.requested_power_state_index
= rdev
->pm
.current_power_state_index
;
118 rdev
->pm
.dynpm_can_downclock
= false;
120 if (rdev
->pm
.active_crtc_count
> 1) {
121 for (i
= 0; i
< rdev
->pm
.num_power_states
; i
++) {
122 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
124 else if (i
>= rdev
->pm
.current_power_state_index
) {
125 rdev
->pm
.requested_power_state_index
=
126 rdev
->pm
.current_power_state_index
;
129 rdev
->pm
.requested_power_state_index
= i
;
134 if (rdev
->pm
.current_power_state_index
== 0)
135 rdev
->pm
.requested_power_state_index
=
136 rdev
->pm
.num_power_states
- 1;
138 rdev
->pm
.requested_power_state_index
=
139 rdev
->pm
.current_power_state_index
- 1;
142 rdev
->pm
.requested_clock_mode_index
= 0;
143 /* don't use the power state if crtcs are active and no display flag is set */
144 if ((rdev
->pm
.active_crtc_count
> 0) &&
145 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
146 clock_info
[rdev
->pm
.requested_clock_mode_index
].flags
&
147 RADEON_PM_MODE_NO_DISPLAY
)) {
148 rdev
->pm
.requested_power_state_index
++;
151 case DYNPM_ACTION_UPCLOCK
:
152 if (rdev
->pm
.current_power_state_index
== (rdev
->pm
.num_power_states
- 1)) {
153 rdev
->pm
.requested_power_state_index
= rdev
->pm
.current_power_state_index
;
154 rdev
->pm
.dynpm_can_upclock
= false;
156 if (rdev
->pm
.active_crtc_count
> 1) {
157 for (i
= (rdev
->pm
.num_power_states
- 1); i
>= 0; i
--) {
158 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
160 else if (i
<= rdev
->pm
.current_power_state_index
) {
161 rdev
->pm
.requested_power_state_index
=
162 rdev
->pm
.current_power_state_index
;
165 rdev
->pm
.requested_power_state_index
= i
;
170 rdev
->pm
.requested_power_state_index
=
171 rdev
->pm
.current_power_state_index
+ 1;
173 rdev
->pm
.requested_clock_mode_index
= 0;
175 case DYNPM_ACTION_DEFAULT
:
176 rdev
->pm
.requested_power_state_index
= rdev
->pm
.default_power_state_index
;
177 rdev
->pm
.requested_clock_mode_index
= 0;
178 rdev
->pm
.dynpm_can_upclock
= false;
180 case DYNPM_ACTION_NONE
:
182 DRM_ERROR("Requested mode for not defined action\n");
186 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
187 /* for now just select the first power state and switch between clock modes */
188 /* power state array is low to high, default is first (0) */
189 if (rdev
->pm
.active_crtc_count
> 1) {
190 rdev
->pm
.requested_power_state_index
= -1;
191 /* start at 1 as we don't want the default mode */
192 for (i
= 1; i
< rdev
->pm
.num_power_states
; i
++) {
193 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
195 else if ((rdev
->pm
.power_state
[i
].type
== POWER_STATE_TYPE_PERFORMANCE
) ||
196 (rdev
->pm
.power_state
[i
].type
== POWER_STATE_TYPE_BATTERY
)) {
197 rdev
->pm
.requested_power_state_index
= i
;
201 /* if nothing selected, grab the default state. */
202 if (rdev
->pm
.requested_power_state_index
== -1)
203 rdev
->pm
.requested_power_state_index
= 0;
205 rdev
->pm
.requested_power_state_index
= 1;
207 switch (rdev
->pm
.dynpm_planned_action
) {
208 case DYNPM_ACTION_MINIMUM
:
209 rdev
->pm
.requested_clock_mode_index
= 0;
210 rdev
->pm
.dynpm_can_downclock
= false;
212 case DYNPM_ACTION_DOWNCLOCK
:
213 if (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
) {
214 if (rdev
->pm
.current_clock_mode_index
== 0) {
215 rdev
->pm
.requested_clock_mode_index
= 0;
216 rdev
->pm
.dynpm_can_downclock
= false;
218 rdev
->pm
.requested_clock_mode_index
=
219 rdev
->pm
.current_clock_mode_index
- 1;
221 rdev
->pm
.requested_clock_mode_index
= 0;
222 rdev
->pm
.dynpm_can_downclock
= false;
224 /* don't use the power state if crtcs are active and no display flag is set */
225 if ((rdev
->pm
.active_crtc_count
> 0) &&
226 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
227 clock_info
[rdev
->pm
.requested_clock_mode_index
].flags
&
228 RADEON_PM_MODE_NO_DISPLAY
)) {
229 rdev
->pm
.requested_clock_mode_index
++;
232 case DYNPM_ACTION_UPCLOCK
:
233 if (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
) {
234 if (rdev
->pm
.current_clock_mode_index
==
235 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].num_clock_modes
- 1)) {
236 rdev
->pm
.requested_clock_mode_index
= rdev
->pm
.current_clock_mode_index
;
237 rdev
->pm
.dynpm_can_upclock
= false;
239 rdev
->pm
.requested_clock_mode_index
=
240 rdev
->pm
.current_clock_mode_index
+ 1;
242 rdev
->pm
.requested_clock_mode_index
=
243 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].num_clock_modes
- 1;
244 rdev
->pm
.dynpm_can_upclock
= false;
247 case DYNPM_ACTION_DEFAULT
:
248 rdev
->pm
.requested_power_state_index
= rdev
->pm
.default_power_state_index
;
249 rdev
->pm
.requested_clock_mode_index
= 0;
250 rdev
->pm
.dynpm_can_upclock
= false;
252 case DYNPM_ACTION_NONE
:
254 DRM_ERROR("Requested mode for not defined action\n");
259 DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
260 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
261 clock_info
[rdev
->pm
.requested_clock_mode_index
].sclk
,
262 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
263 clock_info
[rdev
->pm
.requested_clock_mode_index
].mclk
,
264 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
268 static int r600_pm_get_type_index(struct radeon_device
*rdev
,
269 enum radeon_pm_state_type ps_type
,
273 int found_instance
= -1;
275 for (i
= 0; i
< rdev
->pm
.num_power_states
; i
++) {
276 if (rdev
->pm
.power_state
[i
].type
== ps_type
) {
278 if (found_instance
== instance
)
282 /* return default if no match */
283 return rdev
->pm
.default_power_state_index
;
286 void rs780_pm_init_profile(struct radeon_device
*rdev
)
288 if (rdev
->pm
.num_power_states
== 2) {
290 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
291 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
292 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
293 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
295 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 0;
296 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 0;
297 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
298 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
300 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 0;
301 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 0;
302 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
303 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
305 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 0;
306 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 1;
307 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
308 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
310 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 0;
311 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 0;
312 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
313 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
315 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 0;
316 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 0;
317 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
318 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
320 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 0;
321 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 1;
322 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
323 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
324 } else if (rdev
->pm
.num_power_states
== 3) {
326 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
327 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
328 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
329 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
331 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 1;
332 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 1;
333 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
334 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
336 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 1;
337 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 1;
338 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
339 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
341 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 1;
342 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 2;
343 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
344 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
346 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 1;
347 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 1;
348 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
349 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
351 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 1;
352 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 1;
353 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
354 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
356 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 1;
357 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 2;
358 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
359 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
362 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
363 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
364 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
365 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
367 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 2;
368 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 2;
369 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
370 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
372 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 2;
373 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 2;
374 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
375 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
377 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 2;
378 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 3;
379 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
380 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
382 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 2;
383 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 0;
384 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
385 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
387 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 2;
388 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 0;
389 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
390 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
392 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 2;
393 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 3;
394 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
395 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
399 void r600_pm_init_profile(struct radeon_device
*rdev
)
401 if (rdev
->family
== CHIP_R600
) {
404 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
405 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
406 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
407 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
409 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
410 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
411 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
412 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
414 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
415 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
416 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
417 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
419 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
420 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
421 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
422 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
424 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
425 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
426 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
427 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
429 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
430 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
431 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
432 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
434 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
435 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
436 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
437 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
439 if (rdev
->pm
.num_power_states
< 4) {
441 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
442 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
443 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
444 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 2;
446 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 1;
447 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 1;
448 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
449 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
451 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 1;
452 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 1;
453 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
454 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
456 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 1;
457 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 1;
458 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
459 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 2;
461 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 2;
462 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 2;
463 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
464 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
466 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 2;
467 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 2;
468 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
469 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
471 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 2;
472 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 2;
473 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
474 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 2;
477 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
478 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
479 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
480 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 2;
482 if (rdev
->flags
& RADEON_IS_MOBILITY
) {
483 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
=
484 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
485 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
=
486 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
487 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
488 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
490 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
=
491 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
492 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
=
493 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
494 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
495 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
498 if (rdev
->flags
& RADEON_IS_MOBILITY
) {
499 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
=
500 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
501 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
=
502 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
503 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
504 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
506 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
=
507 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
508 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
=
509 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
510 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
511 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
514 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
=
515 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
516 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
=
517 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
518 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
519 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 2;
521 if (rdev
->flags
& RADEON_IS_MOBILITY
) {
522 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
=
523 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
524 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
=
525 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
526 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
527 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
529 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
=
530 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
531 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
=
532 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
533 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
534 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
537 if (rdev
->flags
& RADEON_IS_MOBILITY
) {
538 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
=
539 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
540 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
=
541 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
542 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
543 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
545 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
=
546 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
547 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
=
548 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
549 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
550 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
553 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
=
554 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
555 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
=
556 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
557 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
558 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 2;
563 void r600_pm_misc(struct radeon_device
*rdev
)
565 int req_ps_idx
= rdev
->pm
.requested_power_state_index
;
566 int req_cm_idx
= rdev
->pm
.requested_clock_mode_index
;
567 struct radeon_power_state
*ps
= &rdev
->pm
.power_state
[req_ps_idx
];
568 struct radeon_voltage
*voltage
= &ps
->clock_info
[req_cm_idx
].voltage
;
570 if ((voltage
->type
== VOLTAGE_SW
) && voltage
->voltage
) {
571 if (voltage
->voltage
!= rdev
->pm
.current_vddc
) {
572 radeon_atom_set_voltage(rdev
, voltage
->voltage
);
573 rdev
->pm
.current_vddc
= voltage
->voltage
;
574 DRM_DEBUG("Setting: v: %d\n", voltage
->voltage
);
579 bool r600_gui_idle(struct radeon_device
*rdev
)
581 if (RREG32(GRBM_STATUS
) & GUI_ACTIVE
)
587 /* hpd for digital panel detect/disconnect */
588 bool r600_hpd_sense(struct radeon_device
*rdev
, enum radeon_hpd_id hpd
)
590 bool connected
= false;
592 if (ASIC_IS_DCE3(rdev
)) {
595 if (RREG32(DC_HPD1_INT_STATUS
) & DC_HPDx_SENSE
)
599 if (RREG32(DC_HPD2_INT_STATUS
) & DC_HPDx_SENSE
)
603 if (RREG32(DC_HPD3_INT_STATUS
) & DC_HPDx_SENSE
)
607 if (RREG32(DC_HPD4_INT_STATUS
) & DC_HPDx_SENSE
)
612 if (RREG32(DC_HPD5_INT_STATUS
) & DC_HPDx_SENSE
)
616 if (RREG32(DC_HPD6_INT_STATUS
) & DC_HPDx_SENSE
)
625 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
629 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
633 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
643 void r600_hpd_set_polarity(struct radeon_device
*rdev
,
644 enum radeon_hpd_id hpd
)
647 bool connected
= r600_hpd_sense(rdev
, hpd
);
649 if (ASIC_IS_DCE3(rdev
)) {
652 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
654 tmp
&= ~DC_HPDx_INT_POLARITY
;
656 tmp
|= DC_HPDx_INT_POLARITY
;
657 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
660 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
662 tmp
&= ~DC_HPDx_INT_POLARITY
;
664 tmp
|= DC_HPDx_INT_POLARITY
;
665 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
668 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
670 tmp
&= ~DC_HPDx_INT_POLARITY
;
672 tmp
|= DC_HPDx_INT_POLARITY
;
673 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
676 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
678 tmp
&= ~DC_HPDx_INT_POLARITY
;
680 tmp
|= DC_HPDx_INT_POLARITY
;
681 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
684 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
686 tmp
&= ~DC_HPDx_INT_POLARITY
;
688 tmp
|= DC_HPDx_INT_POLARITY
;
689 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
693 tmp
= RREG32(DC_HPD6_INT_CONTROL
);
695 tmp
&= ~DC_HPDx_INT_POLARITY
;
697 tmp
|= DC_HPDx_INT_POLARITY
;
698 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
706 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
708 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
710 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
711 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
714 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
716 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
718 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
719 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
722 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
724 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
726 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
727 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
735 void r600_hpd_init(struct radeon_device
*rdev
)
737 struct drm_device
*dev
= rdev
->ddev
;
738 struct drm_connector
*connector
;
740 if (ASIC_IS_DCE3(rdev
)) {
741 u32 tmp
= DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
742 if (ASIC_IS_DCE32(rdev
))
745 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
746 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
747 switch (radeon_connector
->hpd
.hpd
) {
749 WREG32(DC_HPD1_CONTROL
, tmp
);
750 rdev
->irq
.hpd
[0] = true;
753 WREG32(DC_HPD2_CONTROL
, tmp
);
754 rdev
->irq
.hpd
[1] = true;
757 WREG32(DC_HPD3_CONTROL
, tmp
);
758 rdev
->irq
.hpd
[2] = true;
761 WREG32(DC_HPD4_CONTROL
, tmp
);
762 rdev
->irq
.hpd
[3] = true;
766 WREG32(DC_HPD5_CONTROL
, tmp
);
767 rdev
->irq
.hpd
[4] = true;
770 WREG32(DC_HPD6_CONTROL
, tmp
);
771 rdev
->irq
.hpd
[5] = true;
778 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
779 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
780 switch (radeon_connector
->hpd
.hpd
) {
782 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
783 rdev
->irq
.hpd
[0] = true;
786 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
787 rdev
->irq
.hpd
[1] = true;
790 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
791 rdev
->irq
.hpd
[2] = true;
798 if (rdev
->irq
.installed
)
802 void r600_hpd_fini(struct radeon_device
*rdev
)
804 struct drm_device
*dev
= rdev
->ddev
;
805 struct drm_connector
*connector
;
807 if (ASIC_IS_DCE3(rdev
)) {
808 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
809 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
810 switch (radeon_connector
->hpd
.hpd
) {
812 WREG32(DC_HPD1_CONTROL
, 0);
813 rdev
->irq
.hpd
[0] = false;
816 WREG32(DC_HPD2_CONTROL
, 0);
817 rdev
->irq
.hpd
[1] = false;
820 WREG32(DC_HPD3_CONTROL
, 0);
821 rdev
->irq
.hpd
[2] = false;
824 WREG32(DC_HPD4_CONTROL
, 0);
825 rdev
->irq
.hpd
[3] = false;
829 WREG32(DC_HPD5_CONTROL
, 0);
830 rdev
->irq
.hpd
[4] = false;
833 WREG32(DC_HPD6_CONTROL
, 0);
834 rdev
->irq
.hpd
[5] = false;
841 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
842 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
843 switch (radeon_connector
->hpd
.hpd
) {
845 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, 0);
846 rdev
->irq
.hpd
[0] = false;
849 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, 0);
850 rdev
->irq
.hpd
[1] = false;
853 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, 0);
854 rdev
->irq
.hpd
[2] = false;
866 void r600_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
871 /* flush hdp cache so updates hit vram */
872 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
874 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR
, rdev
->mc
.gtt_start
>> 12);
875 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR
, (rdev
->mc
.gtt_end
- 1) >> 12);
876 WREG32(VM_CONTEXT0_REQUEST_RESPONSE
, REQUEST_TYPE(1));
877 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
879 tmp
= RREG32(VM_CONTEXT0_REQUEST_RESPONSE
);
880 tmp
= (tmp
& RESPONSE_TYPE_MASK
) >> RESPONSE_TYPE_SHIFT
;
882 printk(KERN_WARNING
"[drm] r600 flush TLB failed\n");
892 int r600_pcie_gart_init(struct radeon_device
*rdev
)
896 if (rdev
->gart
.table
.vram
.robj
) {
897 WARN(1, "R600 PCIE GART already initialized.\n");
900 /* Initialize common gart structure */
901 r
= radeon_gart_init(rdev
);
904 rdev
->gart
.table_size
= rdev
->gart
.num_gpu_pages
* 8;
905 return radeon_gart_table_vram_alloc(rdev
);
908 int r600_pcie_gart_enable(struct radeon_device
*rdev
)
913 if (rdev
->gart
.table
.vram
.robj
== NULL
) {
914 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
917 r
= radeon_gart_table_vram_pin(rdev
);
920 radeon_gart_restore(rdev
);
923 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
924 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
925 EFFECTIVE_L2_QUEUE_SIZE(7));
926 WREG32(VM_L2_CNTL2
, 0);
927 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
928 /* Setup TLB control */
929 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
930 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
931 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
932 ENABLE_WAIT_L2_QUERY
;
933 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
934 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
935 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
936 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
937 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
938 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
939 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
940 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
941 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
942 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
943 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
944 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
945 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
946 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
947 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
948 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
949 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
950 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
951 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
952 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
953 (u32
)(rdev
->dummy_page
.addr
>> 12));
954 for (i
= 1; i
< 7; i
++)
955 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
957 r600_pcie_gart_tlb_flush(rdev
);
958 rdev
->gart
.ready
= true;
962 void r600_pcie_gart_disable(struct radeon_device
*rdev
)
967 /* Disable all tables */
968 for (i
= 0; i
< 7; i
++)
969 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
971 /* Disable L2 cache */
972 WREG32(VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
|
973 EFFECTIVE_L2_QUEUE_SIZE(7));
974 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
975 /* Setup L1 TLB control */
976 tmp
= EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
977 ENABLE_WAIT_L2_QUERY
;
978 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
979 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
980 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
981 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
982 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
983 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
984 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
985 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
986 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
);
987 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
);
988 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
989 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
990 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
);
991 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
992 if (rdev
->gart
.table
.vram
.robj
) {
993 r
= radeon_bo_reserve(rdev
->gart
.table
.vram
.robj
, false);
994 if (likely(r
== 0)) {
995 radeon_bo_kunmap(rdev
->gart
.table
.vram
.robj
);
996 radeon_bo_unpin(rdev
->gart
.table
.vram
.robj
);
997 radeon_bo_unreserve(rdev
->gart
.table
.vram
.robj
);
1002 void r600_pcie_gart_fini(struct radeon_device
*rdev
)
1004 radeon_gart_fini(rdev
);
1005 r600_pcie_gart_disable(rdev
);
1006 radeon_gart_table_vram_free(rdev
);
1009 void r600_agp_enable(struct radeon_device
*rdev
)
1014 /* Setup L2 cache */
1015 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
1016 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
1017 EFFECTIVE_L2_QUEUE_SIZE(7));
1018 WREG32(VM_L2_CNTL2
, 0);
1019 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1020 /* Setup TLB control */
1021 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
1022 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
1023 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1024 ENABLE_WAIT_L2_QUERY
;
1025 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
1026 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
1027 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
1028 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
1029 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
1030 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
1031 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
1032 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
1033 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
1034 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
1035 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
1036 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
1037 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1038 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1039 for (i
= 0; i
< 7; i
++)
1040 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
1043 int r600_mc_wait_for_idle(struct radeon_device
*rdev
)
1048 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
1049 /* read MC_STATUS */
1050 tmp
= RREG32(R_000E50_SRBM_STATUS
) & 0x3F00;
1058 static void r600_mc_program(struct radeon_device
*rdev
)
1060 struct rv515_mc_save save
;
1064 /* Initialize HDP */
1065 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1066 WREG32((0x2c14 + j
), 0x00000000);
1067 WREG32((0x2c18 + j
), 0x00000000);
1068 WREG32((0x2c1c + j
), 0x00000000);
1069 WREG32((0x2c20 + j
), 0x00000000);
1070 WREG32((0x2c24 + j
), 0x00000000);
1072 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
1074 rv515_mc_stop(rdev
, &save
);
1075 if (r600_mc_wait_for_idle(rdev
)) {
1076 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1078 /* Lockout access through VGA aperture (doesn't exist before R600) */
1079 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
1080 /* Update configuration */
1081 if (rdev
->flags
& RADEON_IS_AGP
) {
1082 if (rdev
->mc
.vram_start
< rdev
->mc
.gtt_start
) {
1083 /* VRAM before AGP */
1084 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1085 rdev
->mc
.vram_start
>> 12);
1086 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1087 rdev
->mc
.gtt_end
>> 12);
1089 /* VRAM after AGP */
1090 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1091 rdev
->mc
.gtt_start
>> 12);
1092 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1093 rdev
->mc
.vram_end
>> 12);
1096 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
, rdev
->mc
.vram_start
>> 12);
1097 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
, rdev
->mc
.vram_end
>> 12);
1099 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
, 0);
1100 tmp
= ((rdev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
1101 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
1102 WREG32(MC_VM_FB_LOCATION
, tmp
);
1103 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
1104 WREG32(HDP_NONSURFACE_INFO
, (2 << 7));
1105 WREG32(HDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
1106 if (rdev
->flags
& RADEON_IS_AGP
) {
1107 WREG32(MC_VM_AGP_TOP
, rdev
->mc
.gtt_end
>> 22);
1108 WREG32(MC_VM_AGP_BOT
, rdev
->mc
.gtt_start
>> 22);
1109 WREG32(MC_VM_AGP_BASE
, rdev
->mc
.agp_base
>> 22);
1111 WREG32(MC_VM_AGP_BASE
, 0);
1112 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
1113 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
1115 if (r600_mc_wait_for_idle(rdev
)) {
1116 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1118 rv515_mc_resume(rdev
, &save
);
1119 /* we need to own VRAM, so turn off the VGA renderer here
1120 * to stop it overwriting our objects */
1121 rv515_vga_render_disable(rdev
);
1125 * r600_vram_gtt_location - try to find VRAM & GTT location
1126 * @rdev: radeon device structure holding all necessary informations
1127 * @mc: memory controller structure holding memory informations
1129 * Function will place try to place VRAM at same place as in CPU (PCI)
1130 * address space as some GPU seems to have issue when we reprogram at
1131 * different address space.
1133 * If there is not enough space to fit the unvisible VRAM after the
1134 * aperture then we limit the VRAM size to the aperture.
1136 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1137 * them to be in one from GPU point of view so that we can program GPU to
1138 * catch access outside them (weird GPU policy see ??).
1140 * This function will never fails, worst case are limiting VRAM or GTT.
1142 * Note: GTT start, end, size should be initialized before calling this
1143 * function on AGP platform.
1145 void r600_vram_gtt_location(struct radeon_device
*rdev
, struct radeon_mc
*mc
)
1147 u64 size_bf
, size_af
;
1149 if (mc
->mc_vram_size
> 0xE0000000) {
1150 /* leave room for at least 512M GTT */
1151 dev_warn(rdev
->dev
, "limiting VRAM\n");
1152 mc
->real_vram_size
= 0xE0000000;
1153 mc
->mc_vram_size
= 0xE0000000;
1155 if (rdev
->flags
& RADEON_IS_AGP
) {
1156 size_bf
= mc
->gtt_start
;
1157 size_af
= 0xFFFFFFFF - mc
->gtt_end
+ 1;
1158 if (size_bf
> size_af
) {
1159 if (mc
->mc_vram_size
> size_bf
) {
1160 dev_warn(rdev
->dev
, "limiting VRAM\n");
1161 mc
->real_vram_size
= size_bf
;
1162 mc
->mc_vram_size
= size_bf
;
1164 mc
->vram_start
= mc
->gtt_start
- mc
->mc_vram_size
;
1166 if (mc
->mc_vram_size
> size_af
) {
1167 dev_warn(rdev
->dev
, "limiting VRAM\n");
1168 mc
->real_vram_size
= size_af
;
1169 mc
->mc_vram_size
= size_af
;
1171 mc
->vram_start
= mc
->gtt_end
;
1173 mc
->vram_end
= mc
->vram_start
+ mc
->mc_vram_size
- 1;
1174 dev_info(rdev
->dev
, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1175 mc
->mc_vram_size
>> 20, mc
->vram_start
,
1176 mc
->vram_end
, mc
->real_vram_size
>> 20);
1179 if (rdev
->flags
& RADEON_IS_IGP
)
1180 base
= (RREG32(MC_VM_FB_LOCATION
) & 0xFFFF) << 24;
1181 radeon_vram_location(rdev
, &rdev
->mc
, base
);
1182 radeon_gtt_location(rdev
, mc
);
1186 int r600_mc_init(struct radeon_device
*rdev
)
1189 int chansize
, numchan
;
1191 /* Get VRAM informations */
1192 rdev
->mc
.vram_is_ddr
= true;
1193 tmp
= RREG32(RAMCFG
);
1194 if (tmp
& CHANSIZE_OVERRIDE
) {
1196 } else if (tmp
& CHANSIZE_MASK
) {
1201 tmp
= RREG32(CHMAP
);
1202 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
1217 rdev
->mc
.vram_width
= numchan
* chansize
;
1218 /* Could aper size report 0 ? */
1219 rdev
->mc
.aper_base
= drm_get_resource_start(rdev
->ddev
, 0);
1220 rdev
->mc
.aper_size
= drm_get_resource_len(rdev
->ddev
, 0);
1221 /* Setup GPU memory space */
1222 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
);
1223 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
);
1224 rdev
->mc
.visible_vram_size
= rdev
->mc
.aper_size
;
1225 r600_vram_gtt_location(rdev
, &rdev
->mc
);
1227 if (rdev
->flags
& RADEON_IS_IGP
) {
1228 rs690_pm_info(rdev
);
1229 rdev
->mc
.igp_sideport_enabled
= radeon_atombios_sideport_present(rdev
);
1231 radeon_update_bandwidth_info(rdev
);
1235 /* We doesn't check that the GPU really needs a reset we simply do the
1236 * reset, it's up to the caller to determine if the GPU needs one. We
1237 * might add an helper function to check that.
1239 int r600_gpu_soft_reset(struct radeon_device
*rdev
)
1241 struct rv515_mc_save save
;
1242 u32 grbm_busy_mask
= S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1243 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1244 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1245 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1246 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1247 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1248 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1249 S_008010_GUI_ACTIVE(1);
1250 u32 grbm2_busy_mask
= S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1251 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1252 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1253 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1254 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1255 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1256 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1257 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1260 dev_info(rdev
->dev
, "GPU softreset \n");
1261 dev_info(rdev
->dev
, " R_008010_GRBM_STATUS=0x%08X\n",
1262 RREG32(R_008010_GRBM_STATUS
));
1263 dev_info(rdev
->dev
, " R_008014_GRBM_STATUS2=0x%08X\n",
1264 RREG32(R_008014_GRBM_STATUS2
));
1265 dev_info(rdev
->dev
, " R_000E50_SRBM_STATUS=0x%08X\n",
1266 RREG32(R_000E50_SRBM_STATUS
));
1267 rv515_mc_stop(rdev
, &save
);
1268 if (r600_mc_wait_for_idle(rdev
)) {
1269 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1271 /* Disable CP parsing/prefetching */
1272 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
1273 /* Check if any of the rendering block is busy and reset it */
1274 if ((RREG32(R_008010_GRBM_STATUS
) & grbm_busy_mask
) ||
1275 (RREG32(R_008014_GRBM_STATUS2
) & grbm2_busy_mask
)) {
1276 tmp
= S_008020_SOFT_RESET_CR(1) |
1277 S_008020_SOFT_RESET_DB(1) |
1278 S_008020_SOFT_RESET_CB(1) |
1279 S_008020_SOFT_RESET_PA(1) |
1280 S_008020_SOFT_RESET_SC(1) |
1281 S_008020_SOFT_RESET_SMX(1) |
1282 S_008020_SOFT_RESET_SPI(1) |
1283 S_008020_SOFT_RESET_SX(1) |
1284 S_008020_SOFT_RESET_SH(1) |
1285 S_008020_SOFT_RESET_TC(1) |
1286 S_008020_SOFT_RESET_TA(1) |
1287 S_008020_SOFT_RESET_VC(1) |
1288 S_008020_SOFT_RESET_VGT(1);
1289 dev_info(rdev
->dev
, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
1290 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
1291 RREG32(R_008020_GRBM_SOFT_RESET
);
1293 WREG32(R_008020_GRBM_SOFT_RESET
, 0);
1295 /* Reset CP (we always reset CP) */
1296 tmp
= S_008020_SOFT_RESET_CP(1);
1297 dev_info(rdev
->dev
, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
1298 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
1299 RREG32(R_008020_GRBM_SOFT_RESET
);
1301 WREG32(R_008020_GRBM_SOFT_RESET
, 0);
1302 /* Wait a little for things to settle down */
1304 dev_info(rdev
->dev
, " R_008010_GRBM_STATUS=0x%08X\n",
1305 RREG32(R_008010_GRBM_STATUS
));
1306 dev_info(rdev
->dev
, " R_008014_GRBM_STATUS2=0x%08X\n",
1307 RREG32(R_008014_GRBM_STATUS2
));
1308 dev_info(rdev
->dev
, " R_000E50_SRBM_STATUS=0x%08X\n",
1309 RREG32(R_000E50_SRBM_STATUS
));
1310 rv515_mc_resume(rdev
, &save
);
1314 bool r600_gpu_is_lockup(struct radeon_device
*rdev
)
1321 srbm_status
= RREG32(R_000E50_SRBM_STATUS
);
1322 grbm_status
= RREG32(R_008010_GRBM_STATUS
);
1323 grbm_status2
= RREG32(R_008014_GRBM_STATUS2
);
1324 if (!G_008010_GUI_ACTIVE(grbm_status
)) {
1325 r100_gpu_lockup_update(&rdev
->config
.r300
.lockup
, &rdev
->cp
);
1328 /* force CP activities */
1329 r
= radeon_ring_lock(rdev
, 2);
1332 radeon_ring_write(rdev
, 0x80000000);
1333 radeon_ring_write(rdev
, 0x80000000);
1334 radeon_ring_unlock_commit(rdev
);
1336 rdev
->cp
.rptr
= RREG32(R600_CP_RB_RPTR
);
1337 return r100_gpu_cp_is_lockup(rdev
, &rdev
->config
.r300
.lockup
, &rdev
->cp
);
1340 int r600_asic_reset(struct radeon_device
*rdev
)
1342 return r600_gpu_soft_reset(rdev
);
1345 static u32
r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes
,
1347 u32 backend_disable_mask
)
1349 u32 backend_map
= 0;
1350 u32 enabled_backends_mask
;
1351 u32 enabled_backends_count
;
1353 u32 swizzle_pipe
[R6XX_MAX_PIPES
];
1357 if (num_tile_pipes
> R6XX_MAX_PIPES
)
1358 num_tile_pipes
= R6XX_MAX_PIPES
;
1359 if (num_tile_pipes
< 1)
1361 if (num_backends
> R6XX_MAX_BACKENDS
)
1362 num_backends
= R6XX_MAX_BACKENDS
;
1363 if (num_backends
< 1)
1366 enabled_backends_mask
= 0;
1367 enabled_backends_count
= 0;
1368 for (i
= 0; i
< R6XX_MAX_BACKENDS
; ++i
) {
1369 if (((backend_disable_mask
>> i
) & 1) == 0) {
1370 enabled_backends_mask
|= (1 << i
);
1371 ++enabled_backends_count
;
1373 if (enabled_backends_count
== num_backends
)
1377 if (enabled_backends_count
== 0) {
1378 enabled_backends_mask
= 1;
1379 enabled_backends_count
= 1;
1382 if (enabled_backends_count
!= num_backends
)
1383 num_backends
= enabled_backends_count
;
1385 memset((uint8_t *)&swizzle_pipe
[0], 0, sizeof(u32
) * R6XX_MAX_PIPES
);
1386 switch (num_tile_pipes
) {
1388 swizzle_pipe
[0] = 0;
1391 swizzle_pipe
[0] = 0;
1392 swizzle_pipe
[1] = 1;
1395 swizzle_pipe
[0] = 0;
1396 swizzle_pipe
[1] = 1;
1397 swizzle_pipe
[2] = 2;
1400 swizzle_pipe
[0] = 0;
1401 swizzle_pipe
[1] = 1;
1402 swizzle_pipe
[2] = 2;
1403 swizzle_pipe
[3] = 3;
1406 swizzle_pipe
[0] = 0;
1407 swizzle_pipe
[1] = 1;
1408 swizzle_pipe
[2] = 2;
1409 swizzle_pipe
[3] = 3;
1410 swizzle_pipe
[4] = 4;
1413 swizzle_pipe
[0] = 0;
1414 swizzle_pipe
[1] = 2;
1415 swizzle_pipe
[2] = 4;
1416 swizzle_pipe
[3] = 5;
1417 swizzle_pipe
[4] = 1;
1418 swizzle_pipe
[5] = 3;
1421 swizzle_pipe
[0] = 0;
1422 swizzle_pipe
[1] = 2;
1423 swizzle_pipe
[2] = 4;
1424 swizzle_pipe
[3] = 6;
1425 swizzle_pipe
[4] = 1;
1426 swizzle_pipe
[5] = 3;
1427 swizzle_pipe
[6] = 5;
1430 swizzle_pipe
[0] = 0;
1431 swizzle_pipe
[1] = 2;
1432 swizzle_pipe
[2] = 4;
1433 swizzle_pipe
[3] = 6;
1434 swizzle_pipe
[4] = 1;
1435 swizzle_pipe
[5] = 3;
1436 swizzle_pipe
[6] = 5;
1437 swizzle_pipe
[7] = 7;
1442 for (cur_pipe
= 0; cur_pipe
< num_tile_pipes
; ++cur_pipe
) {
1443 while (((1 << cur_backend
) & enabled_backends_mask
) == 0)
1444 cur_backend
= (cur_backend
+ 1) % R6XX_MAX_BACKENDS
;
1446 backend_map
|= (u32
)(((cur_backend
& 3) << (swizzle_pipe
[cur_pipe
] * 2)));
1448 cur_backend
= (cur_backend
+ 1) % R6XX_MAX_BACKENDS
;
1454 int r600_count_pipe_bits(uint32_t val
)
1458 for (i
= 0; i
< 32; i
++) {
1465 void r600_gpu_init(struct radeon_device
*rdev
)
1470 u32 cc_rb_backend_disable
;
1471 u32 cc_gc_shader_pipe_config
;
1475 u32 sq_gpr_resource_mgmt_1
= 0;
1476 u32 sq_gpr_resource_mgmt_2
= 0;
1477 u32 sq_thread_resource_mgmt
= 0;
1478 u32 sq_stack_resource_mgmt_1
= 0;
1479 u32 sq_stack_resource_mgmt_2
= 0;
1481 /* FIXME: implement */
1482 switch (rdev
->family
) {
1484 rdev
->config
.r600
.max_pipes
= 4;
1485 rdev
->config
.r600
.max_tile_pipes
= 8;
1486 rdev
->config
.r600
.max_simds
= 4;
1487 rdev
->config
.r600
.max_backends
= 4;
1488 rdev
->config
.r600
.max_gprs
= 256;
1489 rdev
->config
.r600
.max_threads
= 192;
1490 rdev
->config
.r600
.max_stack_entries
= 256;
1491 rdev
->config
.r600
.max_hw_contexts
= 8;
1492 rdev
->config
.r600
.max_gs_threads
= 16;
1493 rdev
->config
.r600
.sx_max_export_size
= 128;
1494 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1495 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1496 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1500 rdev
->config
.r600
.max_pipes
= 2;
1501 rdev
->config
.r600
.max_tile_pipes
= 2;
1502 rdev
->config
.r600
.max_simds
= 3;
1503 rdev
->config
.r600
.max_backends
= 1;
1504 rdev
->config
.r600
.max_gprs
= 128;
1505 rdev
->config
.r600
.max_threads
= 192;
1506 rdev
->config
.r600
.max_stack_entries
= 128;
1507 rdev
->config
.r600
.max_hw_contexts
= 8;
1508 rdev
->config
.r600
.max_gs_threads
= 4;
1509 rdev
->config
.r600
.sx_max_export_size
= 128;
1510 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1511 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1512 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1518 rdev
->config
.r600
.max_pipes
= 1;
1519 rdev
->config
.r600
.max_tile_pipes
= 1;
1520 rdev
->config
.r600
.max_simds
= 2;
1521 rdev
->config
.r600
.max_backends
= 1;
1522 rdev
->config
.r600
.max_gprs
= 128;
1523 rdev
->config
.r600
.max_threads
= 192;
1524 rdev
->config
.r600
.max_stack_entries
= 128;
1525 rdev
->config
.r600
.max_hw_contexts
= 4;
1526 rdev
->config
.r600
.max_gs_threads
= 4;
1527 rdev
->config
.r600
.sx_max_export_size
= 128;
1528 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1529 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1530 rdev
->config
.r600
.sq_num_cf_insts
= 1;
1533 rdev
->config
.r600
.max_pipes
= 4;
1534 rdev
->config
.r600
.max_tile_pipes
= 4;
1535 rdev
->config
.r600
.max_simds
= 4;
1536 rdev
->config
.r600
.max_backends
= 4;
1537 rdev
->config
.r600
.max_gprs
= 192;
1538 rdev
->config
.r600
.max_threads
= 192;
1539 rdev
->config
.r600
.max_stack_entries
= 256;
1540 rdev
->config
.r600
.max_hw_contexts
= 8;
1541 rdev
->config
.r600
.max_gs_threads
= 16;
1542 rdev
->config
.r600
.sx_max_export_size
= 128;
1543 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1544 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1545 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1551 /* Initialize HDP */
1552 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1553 WREG32((0x2c14 + j
), 0x00000000);
1554 WREG32((0x2c18 + j
), 0x00000000);
1555 WREG32((0x2c1c + j
), 0x00000000);
1556 WREG32((0x2c20 + j
), 0x00000000);
1557 WREG32((0x2c24 + j
), 0x00000000);
1560 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
1564 ramcfg
= RREG32(RAMCFG
);
1565 switch (rdev
->config
.r600
.max_tile_pipes
) {
1567 tiling_config
|= PIPE_TILING(0);
1570 tiling_config
|= PIPE_TILING(1);
1573 tiling_config
|= PIPE_TILING(2);
1576 tiling_config
|= PIPE_TILING(3);
1581 rdev
->config
.r600
.tiling_npipes
= rdev
->config
.r600
.max_tile_pipes
;
1582 rdev
->config
.r600
.tiling_nbanks
= 4 << ((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
1583 tiling_config
|= BANK_TILING((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
1584 tiling_config
|= GROUP_SIZE(0);
1585 rdev
->config
.r600
.tiling_group_size
= 256;
1586 tmp
= (ramcfg
& NOOFROWS_MASK
) >> NOOFROWS_SHIFT
;
1588 tiling_config
|= ROW_TILING(3);
1589 tiling_config
|= SAMPLE_SPLIT(3);
1591 tiling_config
|= ROW_TILING(tmp
);
1592 tiling_config
|= SAMPLE_SPLIT(tmp
);
1594 tiling_config
|= BANK_SWAPS(1);
1596 cc_rb_backend_disable
= RREG32(CC_RB_BACKEND_DISABLE
) & 0x00ff0000;
1597 cc_rb_backend_disable
|=
1598 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK
<< rdev
->config
.r600
.max_backends
) & R6XX_MAX_BACKENDS_MASK
);
1600 cc_gc_shader_pipe_config
= RREG32(CC_GC_SHADER_PIPE_CONFIG
) & 0xffffff00;
1601 cc_gc_shader_pipe_config
|=
1602 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK
<< rdev
->config
.r600
.max_pipes
) & R6XX_MAX_PIPES_MASK
);
1603 cc_gc_shader_pipe_config
|=
1604 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK
<< rdev
->config
.r600
.max_simds
) & R6XX_MAX_SIMDS_MASK
);
1606 backend_map
= r600_get_tile_pipe_to_backend_map(rdev
->config
.r600
.max_tile_pipes
,
1607 (R6XX_MAX_BACKENDS
-
1608 r600_count_pipe_bits((cc_rb_backend_disable
&
1609 R6XX_MAX_BACKENDS_MASK
) >> 16)),
1610 (cc_rb_backend_disable
>> 16));
1612 tiling_config
|= BACKEND_MAP(backend_map
);
1613 WREG32(GB_TILING_CONFIG
, tiling_config
);
1614 WREG32(DCP_TILING_CONFIG
, tiling_config
& 0xffff);
1615 WREG32(HDP_TILING_CONFIG
, tiling_config
& 0xffff);
1618 WREG32(CC_RB_BACKEND_DISABLE
, cc_rb_backend_disable
);
1619 WREG32(CC_GC_SHADER_PIPE_CONFIG
, cc_gc_shader_pipe_config
);
1620 WREG32(GC_USER_SHADER_PIPE_CONFIG
, cc_gc_shader_pipe_config
);
1622 tmp
= R6XX_MAX_PIPES
- r600_count_pipe_bits((cc_gc_shader_pipe_config
& INACTIVE_QD_PIPES_MASK
) >> 8);
1623 WREG32(VGT_OUT_DEALLOC_CNTL
, (tmp
* 4) & DEALLOC_DIST_MASK
);
1624 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL
, ((tmp
* 4) - 2) & VTX_REUSE_DEPTH_MASK
);
1626 /* Setup some CP states */
1627 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1628 WREG32(CP_MEQ_THRESHOLDS
, (MEQ_END(0x40) | ROQ_END(0x40)));
1630 WREG32(TA_CNTL_AUX
, (DISABLE_CUBE_ANISO
| SYNC_GRADIENT
|
1631 SYNC_WALKER
| SYNC_ALIGNER
));
1632 /* Setup various GPU states */
1633 if (rdev
->family
== CHIP_RV670
)
1634 WREG32(ARB_GDEC_RD_CNTL
, 0x00000021);
1636 tmp
= RREG32(SX_DEBUG_1
);
1637 tmp
|= SMX_EVENT_RELEASE
;
1638 if ((rdev
->family
> CHIP_R600
))
1639 tmp
|= ENABLE_NEW_SMX_ADDRESS
;
1640 WREG32(SX_DEBUG_1
, tmp
);
1642 if (((rdev
->family
) == CHIP_R600
) ||
1643 ((rdev
->family
) == CHIP_RV630
) ||
1644 ((rdev
->family
) == CHIP_RV610
) ||
1645 ((rdev
->family
) == CHIP_RV620
) ||
1646 ((rdev
->family
) == CHIP_RS780
) ||
1647 ((rdev
->family
) == CHIP_RS880
)) {
1648 WREG32(DB_DEBUG
, PREZ_MUST_WAIT_FOR_POSTZ_DONE
);
1650 WREG32(DB_DEBUG
, 0);
1652 WREG32(DB_WATERMARKS
, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1653 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1655 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
1656 WREG32(VGT_NUM_INSTANCES
, 0);
1658 WREG32(SPI_CONFIG_CNTL
, GPR_WRITE_PRIORITY(0));
1659 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(0));
1661 tmp
= RREG32(SQ_MS_FIFO_SIZES
);
1662 if (((rdev
->family
) == CHIP_RV610
) ||
1663 ((rdev
->family
) == CHIP_RV620
) ||
1664 ((rdev
->family
) == CHIP_RS780
) ||
1665 ((rdev
->family
) == CHIP_RS880
)) {
1666 tmp
= (CACHE_FIFO_SIZE(0xa) |
1667 FETCH_FIFO_HIWATER(0xa) |
1668 DONE_FIFO_HIWATER(0xe0) |
1669 ALU_UPDATE_FIFO_HIWATER(0x8));
1670 } else if (((rdev
->family
) == CHIP_R600
) ||
1671 ((rdev
->family
) == CHIP_RV630
)) {
1672 tmp
&= ~DONE_FIFO_HIWATER(0xff);
1673 tmp
|= DONE_FIFO_HIWATER(0x4);
1675 WREG32(SQ_MS_FIFO_SIZES
, tmp
);
1677 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1678 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1680 sq_config
= RREG32(SQ_CONFIG
);
1681 sq_config
&= ~(PS_PRIO(3) |
1685 sq_config
|= (DX9_CONSTS
|
1692 if ((rdev
->family
) == CHIP_R600
) {
1693 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(124) |
1695 NUM_CLAUSE_TEMP_GPRS(4));
1696 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(0) |
1698 sq_thread_resource_mgmt
= (NUM_PS_THREADS(136) |
1699 NUM_VS_THREADS(48) |
1702 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(128) |
1703 NUM_VS_STACK_ENTRIES(128));
1704 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(0) |
1705 NUM_ES_STACK_ENTRIES(0));
1706 } else if (((rdev
->family
) == CHIP_RV610
) ||
1707 ((rdev
->family
) == CHIP_RV620
) ||
1708 ((rdev
->family
) == CHIP_RS780
) ||
1709 ((rdev
->family
) == CHIP_RS880
)) {
1710 /* no vertex cache */
1711 sq_config
&= ~VC_ENABLE
;
1713 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1715 NUM_CLAUSE_TEMP_GPRS(2));
1716 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
1718 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1719 NUM_VS_THREADS(78) |
1721 NUM_ES_THREADS(31));
1722 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
1723 NUM_VS_STACK_ENTRIES(40));
1724 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
1725 NUM_ES_STACK_ENTRIES(16));
1726 } else if (((rdev
->family
) == CHIP_RV630
) ||
1727 ((rdev
->family
) == CHIP_RV635
)) {
1728 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1730 NUM_CLAUSE_TEMP_GPRS(2));
1731 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(18) |
1733 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1734 NUM_VS_THREADS(78) |
1736 NUM_ES_THREADS(31));
1737 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
1738 NUM_VS_STACK_ENTRIES(40));
1739 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
1740 NUM_ES_STACK_ENTRIES(16));
1741 } else if ((rdev
->family
) == CHIP_RV670
) {
1742 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1744 NUM_CLAUSE_TEMP_GPRS(2));
1745 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
1747 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1748 NUM_VS_THREADS(78) |
1750 NUM_ES_THREADS(31));
1751 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(64) |
1752 NUM_VS_STACK_ENTRIES(64));
1753 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(64) |
1754 NUM_ES_STACK_ENTRIES(64));
1757 WREG32(SQ_CONFIG
, sq_config
);
1758 WREG32(SQ_GPR_RESOURCE_MGMT_1
, sq_gpr_resource_mgmt_1
);
1759 WREG32(SQ_GPR_RESOURCE_MGMT_2
, sq_gpr_resource_mgmt_2
);
1760 WREG32(SQ_THREAD_RESOURCE_MGMT
, sq_thread_resource_mgmt
);
1761 WREG32(SQ_STACK_RESOURCE_MGMT_1
, sq_stack_resource_mgmt_1
);
1762 WREG32(SQ_STACK_RESOURCE_MGMT_2
, sq_stack_resource_mgmt_2
);
1764 if (((rdev
->family
) == CHIP_RV610
) ||
1765 ((rdev
->family
) == CHIP_RV620
) ||
1766 ((rdev
->family
) == CHIP_RS780
) ||
1767 ((rdev
->family
) == CHIP_RS880
)) {
1768 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(TC_ONLY
));
1770 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(VC_AND_TC
));
1773 /* More default values. 2D/3D driver should adjust as needed */
1774 WREG32(PA_SC_AA_SAMPLE_LOCS_2S
, (S0_X(0xc) | S0_Y(0x4) |
1775 S1_X(0x4) | S1_Y(0xc)));
1776 WREG32(PA_SC_AA_SAMPLE_LOCS_4S
, (S0_X(0xe) | S0_Y(0xe) |
1777 S1_X(0x2) | S1_Y(0x2) |
1778 S2_X(0xa) | S2_Y(0x6) |
1779 S3_X(0x6) | S3_Y(0xa)));
1780 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0
, (S0_X(0xe) | S0_Y(0xb) |
1781 S1_X(0x4) | S1_Y(0xc) |
1782 S2_X(0x1) | S2_Y(0x6) |
1783 S3_X(0xa) | S3_Y(0xe)));
1784 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1
, (S4_X(0x6) | S4_Y(0x1) |
1785 S5_X(0x0) | S5_Y(0x0) |
1786 S6_X(0xb) | S6_Y(0x4) |
1787 S7_X(0x7) | S7_Y(0x8)));
1789 WREG32(VGT_STRMOUT_EN
, 0);
1790 tmp
= rdev
->config
.r600
.max_pipes
* 16;
1791 switch (rdev
->family
) {
1807 WREG32(VGT_ES_PER_GS
, 128);
1808 WREG32(VGT_GS_PER_ES
, tmp
);
1809 WREG32(VGT_GS_PER_VS
, 2);
1810 WREG32(VGT_GS_VERTEX_REUSE
, 16);
1812 /* more default values. 2D/3D driver should adjust as needed */
1813 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
1814 WREG32(VGT_STRMOUT_EN
, 0);
1816 WREG32(PA_SC_MODE_CNTL
, 0);
1817 WREG32(PA_SC_AA_CONFIG
, 0);
1818 WREG32(PA_SC_LINE_STIPPLE
, 0);
1819 WREG32(SPI_INPUT_Z
, 0);
1820 WREG32(SPI_PS_IN_CONTROL_0
, NUM_INTERP(2));
1821 WREG32(CB_COLOR7_FRAG
, 0);
1823 /* Clear render buffer base addresses */
1824 WREG32(CB_COLOR0_BASE
, 0);
1825 WREG32(CB_COLOR1_BASE
, 0);
1826 WREG32(CB_COLOR2_BASE
, 0);
1827 WREG32(CB_COLOR3_BASE
, 0);
1828 WREG32(CB_COLOR4_BASE
, 0);
1829 WREG32(CB_COLOR5_BASE
, 0);
1830 WREG32(CB_COLOR6_BASE
, 0);
1831 WREG32(CB_COLOR7_BASE
, 0);
1832 WREG32(CB_COLOR7_FRAG
, 0);
1834 switch (rdev
->family
) {
1839 tmp
= TC_L2_SIZE(8);
1843 tmp
= TC_L2_SIZE(4);
1846 tmp
= TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT
;
1849 tmp
= TC_L2_SIZE(0);
1852 WREG32(TC_CNTL
, tmp
);
1854 tmp
= RREG32(HDP_HOST_PATH_CNTL
);
1855 WREG32(HDP_HOST_PATH_CNTL
, tmp
);
1857 tmp
= RREG32(ARB_POP
);
1858 tmp
|= ENABLE_TC128
;
1859 WREG32(ARB_POP
, tmp
);
1861 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
1862 WREG32(PA_CL_ENHANCE
, (CLIP_VTX_REORDER_ENA
|
1864 WREG32(PA_SC_ENHANCE
, FORCE_EOV_MAX_CLK_CNT(4095));
1869 * Indirect registers accessor
1871 u32
r600_pciep_rreg(struct radeon_device
*rdev
, u32 reg
)
1875 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
1876 (void)RREG32(PCIE_PORT_INDEX
);
1877 r
= RREG32(PCIE_PORT_DATA
);
1881 void r600_pciep_wreg(struct radeon_device
*rdev
, u32 reg
, u32 v
)
1883 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
1884 (void)RREG32(PCIE_PORT_INDEX
);
1885 WREG32(PCIE_PORT_DATA
, (v
));
1886 (void)RREG32(PCIE_PORT_DATA
);
1892 void r600_cp_stop(struct radeon_device
*rdev
)
1894 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
1897 int r600_init_microcode(struct radeon_device
*rdev
)
1899 struct platform_device
*pdev
;
1900 const char *chip_name
;
1901 const char *rlc_chip_name
;
1902 size_t pfp_req_size
, me_req_size
, rlc_req_size
;
1908 pdev
= platform_device_register_simple("radeon_cp", 0, NULL
, 0);
1911 printk(KERN_ERR
"radeon_cp: Failed to register firmware\n");
1915 switch (rdev
->family
) {
1918 rlc_chip_name
= "R600";
1921 chip_name
= "RV610";
1922 rlc_chip_name
= "R600";
1925 chip_name
= "RV630";
1926 rlc_chip_name
= "R600";
1929 chip_name
= "RV620";
1930 rlc_chip_name
= "R600";
1933 chip_name
= "RV635";
1934 rlc_chip_name
= "R600";
1937 chip_name
= "RV670";
1938 rlc_chip_name
= "R600";
1942 chip_name
= "RS780";
1943 rlc_chip_name
= "R600";
1946 chip_name
= "RV770";
1947 rlc_chip_name
= "R700";
1951 chip_name
= "RV730";
1952 rlc_chip_name
= "R700";
1955 chip_name
= "RV710";
1956 rlc_chip_name
= "R700";
1959 chip_name
= "CEDAR";
1960 rlc_chip_name
= "CEDAR";
1963 chip_name
= "REDWOOD";
1964 rlc_chip_name
= "REDWOOD";
1967 chip_name
= "JUNIPER";
1968 rlc_chip_name
= "JUNIPER";
1972 chip_name
= "CYPRESS";
1973 rlc_chip_name
= "CYPRESS";
1978 if (rdev
->family
>= CHIP_CEDAR
) {
1979 pfp_req_size
= EVERGREEN_PFP_UCODE_SIZE
* 4;
1980 me_req_size
= EVERGREEN_PM4_UCODE_SIZE
* 4;
1981 rlc_req_size
= EVERGREEN_RLC_UCODE_SIZE
* 4;
1982 } else if (rdev
->family
>= CHIP_RV770
) {
1983 pfp_req_size
= R700_PFP_UCODE_SIZE
* 4;
1984 me_req_size
= R700_PM4_UCODE_SIZE
* 4;
1985 rlc_req_size
= R700_RLC_UCODE_SIZE
* 4;
1987 pfp_req_size
= PFP_UCODE_SIZE
* 4;
1988 me_req_size
= PM4_UCODE_SIZE
* 12;
1989 rlc_req_size
= RLC_UCODE_SIZE
* 4;
1992 DRM_INFO("Loading %s Microcode\n", chip_name
);
1994 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_pfp.bin", chip_name
);
1995 err
= request_firmware(&rdev
->pfp_fw
, fw_name
, &pdev
->dev
);
1998 if (rdev
->pfp_fw
->size
!= pfp_req_size
) {
2000 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2001 rdev
->pfp_fw
->size
, fw_name
);
2006 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_me.bin", chip_name
);
2007 err
= request_firmware(&rdev
->me_fw
, fw_name
, &pdev
->dev
);
2010 if (rdev
->me_fw
->size
!= me_req_size
) {
2012 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2013 rdev
->me_fw
->size
, fw_name
);
2017 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_rlc.bin", rlc_chip_name
);
2018 err
= request_firmware(&rdev
->rlc_fw
, fw_name
, &pdev
->dev
);
2021 if (rdev
->rlc_fw
->size
!= rlc_req_size
) {
2023 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2024 rdev
->rlc_fw
->size
, fw_name
);
2029 platform_device_unregister(pdev
);
2034 "r600_cp: Failed to load firmware \"%s\"\n",
2036 release_firmware(rdev
->pfp_fw
);
2037 rdev
->pfp_fw
= NULL
;
2038 release_firmware(rdev
->me_fw
);
2040 release_firmware(rdev
->rlc_fw
);
2041 rdev
->rlc_fw
= NULL
;
2046 static int r600_cp_load_microcode(struct radeon_device
*rdev
)
2048 const __be32
*fw_data
;
2051 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
2056 WREG32(CP_RB_CNTL
, RB_NO_UPDATE
| RB_BLKSZ(15) | RB_BUFSZ(3));
2059 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
2060 RREG32(GRBM_SOFT_RESET
);
2062 WREG32(GRBM_SOFT_RESET
, 0);
2064 WREG32(CP_ME_RAM_WADDR
, 0);
2066 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
2067 WREG32(CP_ME_RAM_WADDR
, 0);
2068 for (i
= 0; i
< PM4_UCODE_SIZE
* 3; i
++)
2069 WREG32(CP_ME_RAM_DATA
,
2070 be32_to_cpup(fw_data
++));
2072 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
2073 WREG32(CP_PFP_UCODE_ADDR
, 0);
2074 for (i
= 0; i
< PFP_UCODE_SIZE
; i
++)
2075 WREG32(CP_PFP_UCODE_DATA
,
2076 be32_to_cpup(fw_data
++));
2078 WREG32(CP_PFP_UCODE_ADDR
, 0);
2079 WREG32(CP_ME_RAM_WADDR
, 0);
2080 WREG32(CP_ME_RAM_RADDR
, 0);
2084 int r600_cp_start(struct radeon_device
*rdev
)
2089 r
= radeon_ring_lock(rdev
, 7);
2091 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
2094 radeon_ring_write(rdev
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
2095 radeon_ring_write(rdev
, 0x1);
2096 if (rdev
->family
>= CHIP_CEDAR
) {
2097 radeon_ring_write(rdev
, 0x0);
2098 radeon_ring_write(rdev
, rdev
->config
.evergreen
.max_hw_contexts
- 1);
2099 } else if (rdev
->family
>= CHIP_RV770
) {
2100 radeon_ring_write(rdev
, 0x0);
2101 radeon_ring_write(rdev
, rdev
->config
.rv770
.max_hw_contexts
- 1);
2103 radeon_ring_write(rdev
, 0x3);
2104 radeon_ring_write(rdev
, rdev
->config
.r600
.max_hw_contexts
- 1);
2106 radeon_ring_write(rdev
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2107 radeon_ring_write(rdev
, 0);
2108 radeon_ring_write(rdev
, 0);
2109 radeon_ring_unlock_commit(rdev
);
2112 WREG32(R_0086D8_CP_ME_CNTL
, cp_me
);
2116 int r600_cp_resume(struct radeon_device
*rdev
)
2123 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
2124 RREG32(GRBM_SOFT_RESET
);
2126 WREG32(GRBM_SOFT_RESET
, 0);
2128 /* Set ring buffer size */
2129 rb_bufsz
= drm_order(rdev
->cp
.ring_size
/ 8);
2130 tmp
= RB_NO_UPDATE
| (drm_order(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
2132 tmp
|= BUF_SWAP_32BIT
;
2134 WREG32(CP_RB_CNTL
, tmp
);
2135 WREG32(CP_SEM_WAIT_TIMER
, 0x4);
2137 /* Set the write pointer delay */
2138 WREG32(CP_RB_WPTR_DELAY
, 0);
2140 /* Initialize the ring buffer's read and write pointers */
2141 WREG32(CP_RB_CNTL
, tmp
| RB_RPTR_WR_ENA
);
2142 WREG32(CP_RB_RPTR_WR
, 0);
2143 WREG32(CP_RB_WPTR
, 0);
2144 WREG32(CP_RB_RPTR_ADDR
, rdev
->cp
.gpu_addr
& 0xFFFFFFFF);
2145 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->cp
.gpu_addr
));
2147 WREG32(CP_RB_CNTL
, tmp
);
2149 WREG32(CP_RB_BASE
, rdev
->cp
.gpu_addr
>> 8);
2150 WREG32(CP_DEBUG
, (1 << 27) | (1 << 28));
2152 rdev
->cp
.rptr
= RREG32(CP_RB_RPTR
);
2153 rdev
->cp
.wptr
= RREG32(CP_RB_WPTR
);
2155 r600_cp_start(rdev
);
2156 rdev
->cp
.ready
= true;
2157 r
= radeon_ring_test(rdev
);
2159 rdev
->cp
.ready
= false;
2165 void r600_cp_commit(struct radeon_device
*rdev
)
2167 WREG32(CP_RB_WPTR
, rdev
->cp
.wptr
);
2168 (void)RREG32(CP_RB_WPTR
);
2171 void r600_ring_init(struct radeon_device
*rdev
, unsigned ring_size
)
2175 /* Align ring size */
2176 rb_bufsz
= drm_order(ring_size
/ 8);
2177 ring_size
= (1 << (rb_bufsz
+ 1)) * 4;
2178 rdev
->cp
.ring_size
= ring_size
;
2179 rdev
->cp
.align_mask
= 16 - 1;
2182 void r600_cp_fini(struct radeon_device
*rdev
)
2185 radeon_ring_fini(rdev
);
2190 * GPU scratch registers helpers function.
2192 void r600_scratch_init(struct radeon_device
*rdev
)
2196 rdev
->scratch
.num_reg
= 7;
2197 for (i
= 0; i
< rdev
->scratch
.num_reg
; i
++) {
2198 rdev
->scratch
.free
[i
] = true;
2199 rdev
->scratch
.reg
[i
] = SCRATCH_REG0
+ (i
* 4);
2203 int r600_ring_test(struct radeon_device
*rdev
)
2210 r
= radeon_scratch_get(rdev
, &scratch
);
2212 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r
);
2215 WREG32(scratch
, 0xCAFEDEAD);
2216 r
= radeon_ring_lock(rdev
, 3);
2218 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
2219 radeon_scratch_free(rdev
, scratch
);
2222 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2223 radeon_ring_write(rdev
, ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2224 radeon_ring_write(rdev
, 0xDEADBEEF);
2225 radeon_ring_unlock_commit(rdev
);
2226 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
2227 tmp
= RREG32(scratch
);
2228 if (tmp
== 0xDEADBEEF)
2232 if (i
< rdev
->usec_timeout
) {
2233 DRM_INFO("ring test succeeded in %d usecs\n", i
);
2235 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2239 radeon_scratch_free(rdev
, scratch
);
2243 void r600_wb_disable(struct radeon_device
*rdev
)
2247 WREG32(SCRATCH_UMSK
, 0);
2248 if (rdev
->wb
.wb_obj
) {
2249 r
= radeon_bo_reserve(rdev
->wb
.wb_obj
, false);
2250 if (unlikely(r
!= 0))
2252 radeon_bo_kunmap(rdev
->wb
.wb_obj
);
2253 radeon_bo_unpin(rdev
->wb
.wb_obj
);
2254 radeon_bo_unreserve(rdev
->wb
.wb_obj
);
2258 void r600_wb_fini(struct radeon_device
*rdev
)
2260 r600_wb_disable(rdev
);
2261 if (rdev
->wb
.wb_obj
) {
2262 radeon_bo_unref(&rdev
->wb
.wb_obj
);
2264 rdev
->wb
.wb_obj
= NULL
;
2268 int r600_wb_enable(struct radeon_device
*rdev
)
2272 if (rdev
->wb
.wb_obj
== NULL
) {
2273 r
= radeon_bo_create(rdev
, NULL
, RADEON_GPU_PAGE_SIZE
, true,
2274 RADEON_GEM_DOMAIN_GTT
, &rdev
->wb
.wb_obj
);
2276 dev_warn(rdev
->dev
, "(%d) create WB bo failed\n", r
);
2279 r
= radeon_bo_reserve(rdev
->wb
.wb_obj
, false);
2280 if (unlikely(r
!= 0)) {
2284 r
= radeon_bo_pin(rdev
->wb
.wb_obj
, RADEON_GEM_DOMAIN_GTT
,
2285 &rdev
->wb
.gpu_addr
);
2287 radeon_bo_unreserve(rdev
->wb
.wb_obj
);
2288 dev_warn(rdev
->dev
, "(%d) pin WB bo failed\n", r
);
2292 r
= radeon_bo_kmap(rdev
->wb
.wb_obj
, (void **)&rdev
->wb
.wb
);
2293 radeon_bo_unreserve(rdev
->wb
.wb_obj
);
2295 dev_warn(rdev
->dev
, "(%d) map WB bo failed\n", r
);
2300 WREG32(SCRATCH_ADDR
, (rdev
->wb
.gpu_addr
>> 8) & 0xFFFFFFFF);
2301 WREG32(CP_RB_RPTR_ADDR
, (rdev
->wb
.gpu_addr
+ 1024) & 0xFFFFFFFC);
2302 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ 1024) & 0xFF);
2303 WREG32(SCRATCH_UMSK
, 0xff);
2307 void r600_fence_ring_emit(struct radeon_device
*rdev
,
2308 struct radeon_fence
*fence
)
2310 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
2312 radeon_ring_write(rdev
, PACKET3(PACKET3_EVENT_WRITE
, 0));
2313 radeon_ring_write(rdev
, CACHE_FLUSH_AND_INV_EVENT
);
2314 /* wait for 3D idle clean */
2315 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2316 radeon_ring_write(rdev
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2317 radeon_ring_write(rdev
, WAIT_3D_IDLE_bit
| WAIT_3D_IDLECLEAN_bit
);
2318 /* Emit fence sequence & fire IRQ */
2319 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2320 radeon_ring_write(rdev
, ((rdev
->fence_drv
.scratch_reg
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2321 radeon_ring_write(rdev
, fence
->seq
);
2322 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2323 radeon_ring_write(rdev
, PACKET0(CP_INT_STATUS
, 0));
2324 radeon_ring_write(rdev
, RB_INT_STAT
);
2327 int r600_copy_blit(struct radeon_device
*rdev
,
2328 uint64_t src_offset
, uint64_t dst_offset
,
2329 unsigned num_pages
, struct radeon_fence
*fence
)
2333 mutex_lock(&rdev
->r600_blit
.mutex
);
2334 rdev
->r600_blit
.vb_ib
= NULL
;
2335 r
= r600_blit_prepare_copy(rdev
, num_pages
* RADEON_GPU_PAGE_SIZE
);
2337 if (rdev
->r600_blit
.vb_ib
)
2338 radeon_ib_free(rdev
, &rdev
->r600_blit
.vb_ib
);
2339 mutex_unlock(&rdev
->r600_blit
.mutex
);
2342 r600_kms_blit_copy(rdev
, src_offset
, dst_offset
, num_pages
* RADEON_GPU_PAGE_SIZE
);
2343 r600_blit_done_copy(rdev
, fence
);
2344 mutex_unlock(&rdev
->r600_blit
.mutex
);
2348 int r600_set_surface_reg(struct radeon_device
*rdev
, int reg
,
2349 uint32_t tiling_flags
, uint32_t pitch
,
2350 uint32_t offset
, uint32_t obj_size
)
2352 /* FIXME: implement */
2356 void r600_clear_surface_reg(struct radeon_device
*rdev
, int reg
)
2358 /* FIXME: implement */
2362 bool r600_card_posted(struct radeon_device
*rdev
)
2366 /* first check CRTCs */
2367 reg
= RREG32(D1CRTC_CONTROL
) |
2368 RREG32(D2CRTC_CONTROL
);
2372 /* then check MEM_SIZE, in case the crtcs are off */
2373 if (RREG32(CONFIG_MEMSIZE
))
2379 int r600_startup(struct radeon_device
*rdev
)
2383 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
) {
2384 r
= r600_init_microcode(rdev
);
2386 DRM_ERROR("Failed to load firmware!\n");
2391 r600_mc_program(rdev
);
2392 if (rdev
->flags
& RADEON_IS_AGP
) {
2393 r600_agp_enable(rdev
);
2395 r
= r600_pcie_gart_enable(rdev
);
2399 r600_gpu_init(rdev
);
2400 r
= r600_blit_init(rdev
);
2402 r600_blit_fini(rdev
);
2403 rdev
->asic
->copy
= NULL
;
2404 dev_warn(rdev
->dev
, "failed blitter (%d) falling back to memcpy\n", r
);
2406 /* pin copy shader into vram */
2407 if (rdev
->r600_blit
.shader_obj
) {
2408 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
2409 if (unlikely(r
!= 0))
2411 r
= radeon_bo_pin(rdev
->r600_blit
.shader_obj
, RADEON_GEM_DOMAIN_VRAM
,
2412 &rdev
->r600_blit
.shader_gpu_addr
);
2413 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
2415 dev_err(rdev
->dev
, "(%d) pin blit object failed\n", r
);
2420 r
= r600_irq_init(rdev
);
2422 DRM_ERROR("radeon: IH init failed (%d).\n", r
);
2423 radeon_irq_kms_fini(rdev
);
2428 r
= radeon_ring_init(rdev
, rdev
->cp
.ring_size
);
2431 r
= r600_cp_load_microcode(rdev
);
2434 r
= r600_cp_resume(rdev
);
2437 /* write back buffer are not vital so don't worry about failure */
2438 r600_wb_enable(rdev
);
2442 void r600_vga_set_state(struct radeon_device
*rdev
, bool state
)
2446 temp
= RREG32(CONFIG_CNTL
);
2447 if (state
== false) {
2453 WREG32(CONFIG_CNTL
, temp
);
2456 int r600_resume(struct radeon_device
*rdev
)
2460 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2461 * posting will perform necessary task to bring back GPU into good
2465 atom_asic_init(rdev
->mode_info
.atom_context
);
2466 /* Initialize clocks */
2467 r
= radeon_clocks_init(rdev
);
2472 r
= r600_startup(rdev
);
2474 DRM_ERROR("r600 startup failed on resume\n");
2478 r
= r600_ib_test(rdev
);
2480 DRM_ERROR("radeon: failled testing IB (%d).\n", r
);
2484 r
= r600_audio_init(rdev
);
2486 DRM_ERROR("radeon: audio resume failed\n");
2493 int r600_suspend(struct radeon_device
*rdev
)
2497 r600_audio_fini(rdev
);
2498 /* FIXME: we should wait for ring to be empty */
2500 rdev
->cp
.ready
= false;
2501 r600_irq_suspend(rdev
);
2502 r600_wb_disable(rdev
);
2503 r600_pcie_gart_disable(rdev
);
2504 /* unpin shaders bo */
2505 if (rdev
->r600_blit
.shader_obj
) {
2506 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
2508 radeon_bo_unpin(rdev
->r600_blit
.shader_obj
);
2509 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
2515 /* Plan is to move initialization in that function and use
2516 * helper function so that radeon_device_init pretty much
2517 * do nothing more than calling asic specific function. This
2518 * should also allow to remove a bunch of callback function
2521 int r600_init(struct radeon_device
*rdev
)
2525 r
= radeon_dummy_page_init(rdev
);
2528 if (r600_debugfs_mc_info_init(rdev
)) {
2529 DRM_ERROR("Failed to register debugfs file for mc !\n");
2531 /* This don't do much */
2532 r
= radeon_gem_init(rdev
);
2536 if (!radeon_get_bios(rdev
)) {
2537 if (ASIC_IS_AVIVO(rdev
))
2540 /* Must be an ATOMBIOS */
2541 if (!rdev
->is_atom_bios
) {
2542 dev_err(rdev
->dev
, "Expecting atombios for R600 GPU\n");
2545 r
= radeon_atombios_init(rdev
);
2548 /* Post card if necessary */
2549 if (!r600_card_posted(rdev
)) {
2551 dev_err(rdev
->dev
, "Card not posted and no BIOS - ignoring\n");
2554 DRM_INFO("GPU not posted. posting now...\n");
2555 atom_asic_init(rdev
->mode_info
.atom_context
);
2557 /* Initialize scratch registers */
2558 r600_scratch_init(rdev
);
2559 /* Initialize surface registers */
2560 radeon_surface_init(rdev
);
2561 /* Initialize clocks */
2562 radeon_get_clock_info(rdev
->ddev
);
2563 r
= radeon_clocks_init(rdev
);
2567 r
= radeon_fence_driver_init(rdev
);
2570 if (rdev
->flags
& RADEON_IS_AGP
) {
2571 r
= radeon_agp_init(rdev
);
2573 radeon_agp_disable(rdev
);
2575 r
= r600_mc_init(rdev
);
2578 /* Memory manager */
2579 r
= radeon_bo_init(rdev
);
2583 r
= radeon_irq_kms_init(rdev
);
2587 rdev
->cp
.ring_obj
= NULL
;
2588 r600_ring_init(rdev
, 1024 * 1024);
2590 rdev
->ih
.ring_obj
= NULL
;
2591 r600_ih_ring_init(rdev
, 64 * 1024);
2593 r
= r600_pcie_gart_init(rdev
);
2597 rdev
->accel_working
= true;
2598 r
= r600_startup(rdev
);
2600 dev_err(rdev
->dev
, "disabling GPU acceleration\n");
2603 r600_irq_fini(rdev
);
2604 radeon_irq_kms_fini(rdev
);
2605 r600_pcie_gart_fini(rdev
);
2606 rdev
->accel_working
= false;
2608 if (rdev
->accel_working
) {
2609 r
= radeon_ib_pool_init(rdev
);
2611 dev_err(rdev
->dev
, "IB initialization failed (%d).\n", r
);
2612 rdev
->accel_working
= false;
2614 r
= r600_ib_test(rdev
);
2616 dev_err(rdev
->dev
, "IB test failed (%d).\n", r
);
2617 rdev
->accel_working
= false;
2622 r
= r600_audio_init(rdev
);
2624 return r
; /* TODO error handling */
2628 void r600_fini(struct radeon_device
*rdev
)
2630 r600_audio_fini(rdev
);
2631 r600_blit_fini(rdev
);
2634 r600_irq_fini(rdev
);
2635 radeon_irq_kms_fini(rdev
);
2636 r600_pcie_gart_fini(rdev
);
2637 radeon_agp_fini(rdev
);
2638 radeon_gem_fini(rdev
);
2639 radeon_fence_driver_fini(rdev
);
2640 radeon_clocks_fini(rdev
);
2641 radeon_bo_fini(rdev
);
2642 radeon_atombios_fini(rdev
);
2645 radeon_dummy_page_fini(rdev
);
2652 void r600_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
2654 /* FIXME: implement */
2655 radeon_ring_write(rdev
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
2656 radeon_ring_write(rdev
, ib
->gpu_addr
& 0xFFFFFFFC);
2657 radeon_ring_write(rdev
, upper_32_bits(ib
->gpu_addr
) & 0xFF);
2658 radeon_ring_write(rdev
, ib
->length_dw
);
2661 int r600_ib_test(struct radeon_device
*rdev
)
2663 struct radeon_ib
*ib
;
2669 r
= radeon_scratch_get(rdev
, &scratch
);
2671 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r
);
2674 WREG32(scratch
, 0xCAFEDEAD);
2675 r
= radeon_ib_get(rdev
, &ib
);
2677 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
2680 ib
->ptr
[0] = PACKET3(PACKET3_SET_CONFIG_REG
, 1);
2681 ib
->ptr
[1] = ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2682 ib
->ptr
[2] = 0xDEADBEEF;
2683 ib
->ptr
[3] = PACKET2(0);
2684 ib
->ptr
[4] = PACKET2(0);
2685 ib
->ptr
[5] = PACKET2(0);
2686 ib
->ptr
[6] = PACKET2(0);
2687 ib
->ptr
[7] = PACKET2(0);
2688 ib
->ptr
[8] = PACKET2(0);
2689 ib
->ptr
[9] = PACKET2(0);
2690 ib
->ptr
[10] = PACKET2(0);
2691 ib
->ptr
[11] = PACKET2(0);
2692 ib
->ptr
[12] = PACKET2(0);
2693 ib
->ptr
[13] = PACKET2(0);
2694 ib
->ptr
[14] = PACKET2(0);
2695 ib
->ptr
[15] = PACKET2(0);
2697 r
= radeon_ib_schedule(rdev
, ib
);
2699 radeon_scratch_free(rdev
, scratch
);
2700 radeon_ib_free(rdev
, &ib
);
2701 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
2704 r
= radeon_fence_wait(ib
->fence
, false);
2706 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
2709 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
2710 tmp
= RREG32(scratch
);
2711 if (tmp
== 0xDEADBEEF)
2715 if (i
< rdev
->usec_timeout
) {
2716 DRM_INFO("ib test succeeded in %u usecs\n", i
);
2718 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
2722 radeon_scratch_free(rdev
, scratch
);
2723 radeon_ib_free(rdev
, &ib
);
2730 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2731 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2732 * writing to the ring and the GPU consuming, the GPU writes to the ring
2733 * and host consumes. As the host irq handler processes interrupts, it
2734 * increments the rptr. When the rptr catches up with the wptr, all the
2735 * current interrupts have been processed.
2738 void r600_ih_ring_init(struct radeon_device
*rdev
, unsigned ring_size
)
2742 /* Align ring size */
2743 rb_bufsz
= drm_order(ring_size
/ 4);
2744 ring_size
= (1 << rb_bufsz
) * 4;
2745 rdev
->ih
.ring_size
= ring_size
;
2746 rdev
->ih
.ptr_mask
= rdev
->ih
.ring_size
- 1;
2750 static int r600_ih_ring_alloc(struct radeon_device
*rdev
)
2754 /* Allocate ring buffer */
2755 if (rdev
->ih
.ring_obj
== NULL
) {
2756 r
= radeon_bo_create(rdev
, NULL
, rdev
->ih
.ring_size
,
2758 RADEON_GEM_DOMAIN_GTT
,
2759 &rdev
->ih
.ring_obj
);
2761 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r
);
2764 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
2765 if (unlikely(r
!= 0))
2767 r
= radeon_bo_pin(rdev
->ih
.ring_obj
,
2768 RADEON_GEM_DOMAIN_GTT
,
2769 &rdev
->ih
.gpu_addr
);
2771 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
2772 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r
);
2775 r
= radeon_bo_kmap(rdev
->ih
.ring_obj
,
2776 (void **)&rdev
->ih
.ring
);
2777 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
2779 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r
);
2786 static void r600_ih_ring_fini(struct radeon_device
*rdev
)
2789 if (rdev
->ih
.ring_obj
) {
2790 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
2791 if (likely(r
== 0)) {
2792 radeon_bo_kunmap(rdev
->ih
.ring_obj
);
2793 radeon_bo_unpin(rdev
->ih
.ring_obj
);
2794 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
2796 radeon_bo_unref(&rdev
->ih
.ring_obj
);
2797 rdev
->ih
.ring
= NULL
;
2798 rdev
->ih
.ring_obj
= NULL
;
2802 void r600_rlc_stop(struct radeon_device
*rdev
)
2805 if ((rdev
->family
>= CHIP_RV770
) &&
2806 (rdev
->family
<= CHIP_RV740
)) {
2807 /* r7xx asics need to soft reset RLC before halting */
2808 WREG32(SRBM_SOFT_RESET
, SOFT_RESET_RLC
);
2809 RREG32(SRBM_SOFT_RESET
);
2811 WREG32(SRBM_SOFT_RESET
, 0);
2812 RREG32(SRBM_SOFT_RESET
);
2815 WREG32(RLC_CNTL
, 0);
2818 static void r600_rlc_start(struct radeon_device
*rdev
)
2820 WREG32(RLC_CNTL
, RLC_ENABLE
);
2823 static int r600_rlc_init(struct radeon_device
*rdev
)
2826 const __be32
*fw_data
;
2831 r600_rlc_stop(rdev
);
2833 WREG32(RLC_HB_BASE
, 0);
2834 WREG32(RLC_HB_CNTL
, 0);
2835 WREG32(RLC_HB_RPTR
, 0);
2836 WREG32(RLC_HB_WPTR
, 0);
2837 WREG32(RLC_HB_WPTR_LSB_ADDR
, 0);
2838 WREG32(RLC_HB_WPTR_MSB_ADDR
, 0);
2839 WREG32(RLC_MC_CNTL
, 0);
2840 WREG32(RLC_UCODE_CNTL
, 0);
2842 fw_data
= (const __be32
*)rdev
->rlc_fw
->data
;
2843 if (rdev
->family
>= CHIP_CEDAR
) {
2844 for (i
= 0; i
< EVERGREEN_RLC_UCODE_SIZE
; i
++) {
2845 WREG32(RLC_UCODE_ADDR
, i
);
2846 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2848 } else if (rdev
->family
>= CHIP_RV770
) {
2849 for (i
= 0; i
< R700_RLC_UCODE_SIZE
; i
++) {
2850 WREG32(RLC_UCODE_ADDR
, i
);
2851 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2854 for (i
= 0; i
< RLC_UCODE_SIZE
; i
++) {
2855 WREG32(RLC_UCODE_ADDR
, i
);
2856 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2859 WREG32(RLC_UCODE_ADDR
, 0);
2861 r600_rlc_start(rdev
);
2866 static void r600_enable_interrupts(struct radeon_device
*rdev
)
2868 u32 ih_cntl
= RREG32(IH_CNTL
);
2869 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
2871 ih_cntl
|= ENABLE_INTR
;
2872 ih_rb_cntl
|= IH_RB_ENABLE
;
2873 WREG32(IH_CNTL
, ih_cntl
);
2874 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2875 rdev
->ih
.enabled
= true;
2878 void r600_disable_interrupts(struct radeon_device
*rdev
)
2880 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
2881 u32 ih_cntl
= RREG32(IH_CNTL
);
2883 ih_rb_cntl
&= ~IH_RB_ENABLE
;
2884 ih_cntl
&= ~ENABLE_INTR
;
2885 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2886 WREG32(IH_CNTL
, ih_cntl
);
2887 /* set rptr, wptr to 0 */
2888 WREG32(IH_RB_RPTR
, 0);
2889 WREG32(IH_RB_WPTR
, 0);
2890 rdev
->ih
.enabled
= false;
2895 static void r600_disable_interrupt_state(struct radeon_device
*rdev
)
2899 WREG32(CP_INT_CNTL
, 0);
2900 WREG32(GRBM_INT_CNTL
, 0);
2901 WREG32(DxMODE_INT_MASK
, 0);
2902 if (ASIC_IS_DCE3(rdev
)) {
2903 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL
, 0);
2904 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL
, 0);
2905 tmp
= RREG32(DC_HPD1_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2906 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
2907 tmp
= RREG32(DC_HPD2_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2908 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
2909 tmp
= RREG32(DC_HPD3_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2910 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
2911 tmp
= RREG32(DC_HPD4_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2912 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
2913 if (ASIC_IS_DCE32(rdev
)) {
2914 tmp
= RREG32(DC_HPD5_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2915 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
2916 tmp
= RREG32(DC_HPD6_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2917 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
2920 WREG32(DACA_AUTODETECT_INT_CONTROL
, 0);
2921 WREG32(DACB_AUTODETECT_INT_CONTROL
, 0);
2922 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
2923 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
2924 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
2925 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
2926 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
2927 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
2931 int r600_irq_init(struct radeon_device
*rdev
)
2935 u32 interrupt_cntl
, ih_cntl
, ih_rb_cntl
;
2938 ret
= r600_ih_ring_alloc(rdev
);
2943 r600_disable_interrupts(rdev
);
2946 ret
= r600_rlc_init(rdev
);
2948 r600_ih_ring_fini(rdev
);
2952 /* setup interrupt control */
2953 /* set dummy read address to ring address */
2954 WREG32(INTERRUPT_CNTL2
, rdev
->ih
.gpu_addr
>> 8);
2955 interrupt_cntl
= RREG32(INTERRUPT_CNTL
);
2956 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2957 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2959 interrupt_cntl
&= ~IH_DUMMY_RD_OVERRIDE
;
2960 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2961 interrupt_cntl
&= ~IH_REQ_NONSNOOP_EN
;
2962 WREG32(INTERRUPT_CNTL
, interrupt_cntl
);
2964 WREG32(IH_RB_BASE
, rdev
->ih
.gpu_addr
>> 8);
2965 rb_bufsz
= drm_order(rdev
->ih
.ring_size
/ 4);
2967 ih_rb_cntl
= (IH_WPTR_OVERFLOW_ENABLE
|
2968 IH_WPTR_OVERFLOW_CLEAR
|
2970 /* WPTR writeback, not yet */
2971 /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2972 WREG32(IH_RB_WPTR_ADDR_LO
, 0);
2973 WREG32(IH_RB_WPTR_ADDR_HI
, 0);
2975 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2977 /* set rptr, wptr to 0 */
2978 WREG32(IH_RB_RPTR
, 0);
2979 WREG32(IH_RB_WPTR
, 0);
2981 /* Default settings for IH_CNTL (disabled at first) */
2982 ih_cntl
= MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2983 /* RPTR_REARM only works if msi's are enabled */
2984 if (rdev
->msi_enabled
)
2985 ih_cntl
|= RPTR_REARM
;
2988 ih_cntl
|= IH_MC_SWAP(IH_MC_SWAP_32BIT
);
2990 WREG32(IH_CNTL
, ih_cntl
);
2992 /* force the active interrupt state to all disabled */
2993 if (rdev
->family
>= CHIP_CEDAR
)
2994 evergreen_disable_interrupt_state(rdev
);
2996 r600_disable_interrupt_state(rdev
);
2999 r600_enable_interrupts(rdev
);
3004 void r600_irq_suspend(struct radeon_device
*rdev
)
3006 r600_irq_disable(rdev
);
3007 r600_rlc_stop(rdev
);
3010 void r600_irq_fini(struct radeon_device
*rdev
)
3012 r600_irq_suspend(rdev
);
3013 r600_ih_ring_fini(rdev
);
3016 int r600_irq_set(struct radeon_device
*rdev
)
3018 u32 cp_int_cntl
= CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
;
3020 u32 hpd1
, hpd2
, hpd3
, hpd4
= 0, hpd5
= 0, hpd6
= 0;
3021 u32 grbm_int_cntl
= 0;
3024 if (!rdev
->irq
.installed
) {
3025 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
3028 /* don't enable anything if the ih is disabled */
3029 if (!rdev
->ih
.enabled
) {
3030 r600_disable_interrupts(rdev
);
3031 /* force the active interrupt state to all disabled */
3032 r600_disable_interrupt_state(rdev
);
3036 hdmi1
= RREG32(R600_HDMI_BLOCK1
+ R600_HDMI_CNTL
) & ~R600_HDMI_INT_EN
;
3037 if (ASIC_IS_DCE3(rdev
)) {
3038 hdmi2
= RREG32(R600_HDMI_BLOCK3
+ R600_HDMI_CNTL
) & ~R600_HDMI_INT_EN
;
3039 hpd1
= RREG32(DC_HPD1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3040 hpd2
= RREG32(DC_HPD2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3041 hpd3
= RREG32(DC_HPD3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3042 hpd4
= RREG32(DC_HPD4_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3043 if (ASIC_IS_DCE32(rdev
)) {
3044 hpd5
= RREG32(DC_HPD5_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3045 hpd6
= RREG32(DC_HPD6_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3048 hdmi2
= RREG32(R600_HDMI_BLOCK2
+ R600_HDMI_CNTL
) & ~R600_HDMI_INT_EN
;
3049 hpd1
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3050 hpd2
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3051 hpd3
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3054 if (rdev
->irq
.sw_int
) {
3055 DRM_DEBUG("r600_irq_set: sw int\n");
3056 cp_int_cntl
|= RB_INT_ENABLE
;
3058 if (rdev
->irq
.crtc_vblank_int
[0]) {
3059 DRM_DEBUG("r600_irq_set: vblank 0\n");
3060 mode_int
|= D1MODE_VBLANK_INT_MASK
;
3062 if (rdev
->irq
.crtc_vblank_int
[1]) {
3063 DRM_DEBUG("r600_irq_set: vblank 1\n");
3064 mode_int
|= D2MODE_VBLANK_INT_MASK
;
3066 if (rdev
->irq
.hpd
[0]) {
3067 DRM_DEBUG("r600_irq_set: hpd 1\n");
3068 hpd1
|= DC_HPDx_INT_EN
;
3070 if (rdev
->irq
.hpd
[1]) {
3071 DRM_DEBUG("r600_irq_set: hpd 2\n");
3072 hpd2
|= DC_HPDx_INT_EN
;
3074 if (rdev
->irq
.hpd
[2]) {
3075 DRM_DEBUG("r600_irq_set: hpd 3\n");
3076 hpd3
|= DC_HPDx_INT_EN
;
3078 if (rdev
->irq
.hpd
[3]) {
3079 DRM_DEBUG("r600_irq_set: hpd 4\n");
3080 hpd4
|= DC_HPDx_INT_EN
;
3082 if (rdev
->irq
.hpd
[4]) {
3083 DRM_DEBUG("r600_irq_set: hpd 5\n");
3084 hpd5
|= DC_HPDx_INT_EN
;
3086 if (rdev
->irq
.hpd
[5]) {
3087 DRM_DEBUG("r600_irq_set: hpd 6\n");
3088 hpd6
|= DC_HPDx_INT_EN
;
3090 if (rdev
->irq
.hdmi
[0]) {
3091 DRM_DEBUG("r600_irq_set: hdmi 1\n");
3092 hdmi1
|= R600_HDMI_INT_EN
;
3094 if (rdev
->irq
.hdmi
[1]) {
3095 DRM_DEBUG("r600_irq_set: hdmi 2\n");
3096 hdmi2
|= R600_HDMI_INT_EN
;
3098 if (rdev
->irq
.gui_idle
) {
3099 DRM_DEBUG("gui idle\n");
3100 grbm_int_cntl
|= GUI_IDLE_INT_ENABLE
;
3103 WREG32(CP_INT_CNTL
, cp_int_cntl
);
3104 WREG32(DxMODE_INT_MASK
, mode_int
);
3105 WREG32(GRBM_INT_CNTL
, grbm_int_cntl
);
3106 WREG32(R600_HDMI_BLOCK1
+ R600_HDMI_CNTL
, hdmi1
);
3107 if (ASIC_IS_DCE3(rdev
)) {
3108 WREG32(R600_HDMI_BLOCK3
+ R600_HDMI_CNTL
, hdmi2
);
3109 WREG32(DC_HPD1_INT_CONTROL
, hpd1
);
3110 WREG32(DC_HPD2_INT_CONTROL
, hpd2
);
3111 WREG32(DC_HPD3_INT_CONTROL
, hpd3
);
3112 WREG32(DC_HPD4_INT_CONTROL
, hpd4
);
3113 if (ASIC_IS_DCE32(rdev
)) {
3114 WREG32(DC_HPD5_INT_CONTROL
, hpd5
);
3115 WREG32(DC_HPD6_INT_CONTROL
, hpd6
);
3118 WREG32(R600_HDMI_BLOCK2
+ R600_HDMI_CNTL
, hdmi2
);
3119 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, hpd1
);
3120 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, hpd2
);
3121 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, hpd3
);
3127 static inline void r600_irq_ack(struct radeon_device
*rdev
,
3130 u32
*disp_int_cont2
)
3134 if (ASIC_IS_DCE3(rdev
)) {
3135 *disp_int
= RREG32(DCE3_DISP_INTERRUPT_STATUS
);
3136 *disp_int_cont
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE
);
3137 *disp_int_cont2
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2
);
3139 *disp_int
= RREG32(DISP_INTERRUPT_STATUS
);
3140 *disp_int_cont
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE
);
3141 *disp_int_cont2
= 0;
3144 if (*disp_int
& LB_D1_VBLANK_INTERRUPT
)
3145 WREG32(D1MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
3146 if (*disp_int
& LB_D1_VLINE_INTERRUPT
)
3147 WREG32(D1MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
3148 if (*disp_int
& LB_D2_VBLANK_INTERRUPT
)
3149 WREG32(D2MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
3150 if (*disp_int
& LB_D2_VLINE_INTERRUPT
)
3151 WREG32(D2MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
3152 if (*disp_int
& DC_HPD1_INTERRUPT
) {
3153 if (ASIC_IS_DCE3(rdev
)) {
3154 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
3155 tmp
|= DC_HPDx_INT_ACK
;
3156 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
3158 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
3159 tmp
|= DC_HPDx_INT_ACK
;
3160 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
3163 if (*disp_int
& DC_HPD2_INTERRUPT
) {
3164 if (ASIC_IS_DCE3(rdev
)) {
3165 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
3166 tmp
|= DC_HPDx_INT_ACK
;
3167 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
3169 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
3170 tmp
|= DC_HPDx_INT_ACK
;
3171 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
3174 if (*disp_int_cont
& DC_HPD3_INTERRUPT
) {
3175 if (ASIC_IS_DCE3(rdev
)) {
3176 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
3177 tmp
|= DC_HPDx_INT_ACK
;
3178 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
3180 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
3181 tmp
|= DC_HPDx_INT_ACK
;
3182 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
3185 if (*disp_int_cont
& DC_HPD4_INTERRUPT
) {
3186 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
3187 tmp
|= DC_HPDx_INT_ACK
;
3188 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
3190 if (ASIC_IS_DCE32(rdev
)) {
3191 if (*disp_int_cont2
& DC_HPD5_INTERRUPT
) {
3192 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3193 tmp
|= DC_HPDx_INT_ACK
;
3194 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
3196 if (*disp_int_cont2
& DC_HPD6_INTERRUPT
) {
3197 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3198 tmp
|= DC_HPDx_INT_ACK
;
3199 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
3202 if (RREG32(R600_HDMI_BLOCK1
+ R600_HDMI_STATUS
) & R600_HDMI_INT_PENDING
) {
3203 WREG32_P(R600_HDMI_BLOCK1
+ R600_HDMI_CNTL
, R600_HDMI_INT_ACK
, ~R600_HDMI_INT_ACK
);
3205 if (ASIC_IS_DCE3(rdev
)) {
3206 if (RREG32(R600_HDMI_BLOCK3
+ R600_HDMI_STATUS
) & R600_HDMI_INT_PENDING
) {
3207 WREG32_P(R600_HDMI_BLOCK3
+ R600_HDMI_CNTL
, R600_HDMI_INT_ACK
, ~R600_HDMI_INT_ACK
);
3210 if (RREG32(R600_HDMI_BLOCK2
+ R600_HDMI_STATUS
) & R600_HDMI_INT_PENDING
) {
3211 WREG32_P(R600_HDMI_BLOCK2
+ R600_HDMI_CNTL
, R600_HDMI_INT_ACK
, ~R600_HDMI_INT_ACK
);
3216 void r600_irq_disable(struct radeon_device
*rdev
)
3218 u32 disp_int
, disp_int_cont
, disp_int_cont2
;
3220 r600_disable_interrupts(rdev
);
3221 /* Wait and acknowledge irq */
3223 r600_irq_ack(rdev
, &disp_int
, &disp_int_cont
, &disp_int_cont2
);
3224 r600_disable_interrupt_state(rdev
);
3227 static inline u32
r600_get_ih_wptr(struct radeon_device
*rdev
)
3231 /* XXX use writeback */
3232 wptr
= RREG32(IH_RB_WPTR
);
3234 if (wptr
& RB_OVERFLOW
) {
3235 /* When a ring buffer overflow happen start parsing interrupt
3236 * from the last not overwritten vector (wptr + 16). Hopefully
3237 * this should allow us to catchup.
3239 dev_warn(rdev
->dev
, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3240 wptr
, rdev
->ih
.rptr
, (wptr
+ 16) + rdev
->ih
.ptr_mask
);
3241 rdev
->ih
.rptr
= (wptr
+ 16) & rdev
->ih
.ptr_mask
;
3242 tmp
= RREG32(IH_RB_CNTL
);
3243 tmp
|= IH_WPTR_OVERFLOW_CLEAR
;
3244 WREG32(IH_RB_CNTL
, tmp
);
3246 return (wptr
& rdev
->ih
.ptr_mask
);
3250 * Each IV ring entry is 128 bits:
3251 * [7:0] - interrupt source id
3253 * [59:32] - interrupt source data
3254 * [127:60] - reserved
3256 * The basic interrupt vector entries
3257 * are decoded as follows:
3258 * src_id src_data description
3263 * 19 0 FP Hot plug detection A
3264 * 19 1 FP Hot plug detection B
3265 * 19 2 DAC A auto-detection
3266 * 19 3 DAC B auto-detection
3272 * 181 - EOP Interrupt
3275 * Note, these are based on r600 and may need to be
3276 * adjusted or added to on newer asics
3279 int r600_irq_process(struct radeon_device
*rdev
)
3281 u32 wptr
= r600_get_ih_wptr(rdev
);
3282 u32 rptr
= rdev
->ih
.rptr
;
3283 u32 src_id
, src_data
;
3284 u32 ring_index
, disp_int
, disp_int_cont
, disp_int_cont2
;
3285 unsigned long flags
;
3286 bool queue_hotplug
= false;
3288 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr
, wptr
);
3289 if (!rdev
->ih
.enabled
)
3292 spin_lock_irqsave(&rdev
->ih
.lock
, flags
);
3295 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
3298 if (rdev
->shutdown
) {
3299 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
3304 /* display interrupts */
3305 r600_irq_ack(rdev
, &disp_int
, &disp_int_cont
, &disp_int_cont2
);
3307 rdev
->ih
.wptr
= wptr
;
3308 while (rptr
!= wptr
) {
3309 /* wptr/rptr are in bytes! */
3310 ring_index
= rptr
/ 4;
3311 src_id
= rdev
->ih
.ring
[ring_index
] & 0xff;
3312 src_data
= rdev
->ih
.ring
[ring_index
+ 1] & 0xfffffff;
3315 case 1: /* D1 vblank/vline */
3317 case 0: /* D1 vblank */
3318 if (disp_int
& LB_D1_VBLANK_INTERRUPT
) {
3319 drm_handle_vblank(rdev
->ddev
, 0);
3320 rdev
->pm
.vblank_sync
= true;
3321 wake_up(&rdev
->irq
.vblank_queue
);
3322 disp_int
&= ~LB_D1_VBLANK_INTERRUPT
;
3323 DRM_DEBUG("IH: D1 vblank\n");
3326 case 1: /* D1 vline */
3327 if (disp_int
& LB_D1_VLINE_INTERRUPT
) {
3328 disp_int
&= ~LB_D1_VLINE_INTERRUPT
;
3329 DRM_DEBUG("IH: D1 vline\n");
3333 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3337 case 5: /* D2 vblank/vline */
3339 case 0: /* D2 vblank */
3340 if (disp_int
& LB_D2_VBLANK_INTERRUPT
) {
3341 drm_handle_vblank(rdev
->ddev
, 1);
3342 rdev
->pm
.vblank_sync
= true;
3343 wake_up(&rdev
->irq
.vblank_queue
);
3344 disp_int
&= ~LB_D2_VBLANK_INTERRUPT
;
3345 DRM_DEBUG("IH: D2 vblank\n");
3348 case 1: /* D1 vline */
3349 if (disp_int
& LB_D2_VLINE_INTERRUPT
) {
3350 disp_int
&= ~LB_D2_VLINE_INTERRUPT
;
3351 DRM_DEBUG("IH: D2 vline\n");
3355 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3359 case 19: /* HPD/DAC hotplug */
3362 if (disp_int
& DC_HPD1_INTERRUPT
) {
3363 disp_int
&= ~DC_HPD1_INTERRUPT
;
3364 queue_hotplug
= true;
3365 DRM_DEBUG("IH: HPD1\n");
3369 if (disp_int
& DC_HPD2_INTERRUPT
) {
3370 disp_int
&= ~DC_HPD2_INTERRUPT
;
3371 queue_hotplug
= true;
3372 DRM_DEBUG("IH: HPD2\n");
3376 if (disp_int_cont
& DC_HPD3_INTERRUPT
) {
3377 disp_int_cont
&= ~DC_HPD3_INTERRUPT
;
3378 queue_hotplug
= true;
3379 DRM_DEBUG("IH: HPD3\n");
3383 if (disp_int_cont
& DC_HPD4_INTERRUPT
) {
3384 disp_int_cont
&= ~DC_HPD4_INTERRUPT
;
3385 queue_hotplug
= true;
3386 DRM_DEBUG("IH: HPD4\n");
3390 if (disp_int_cont2
& DC_HPD5_INTERRUPT
) {
3391 disp_int_cont2
&= ~DC_HPD5_INTERRUPT
;
3392 queue_hotplug
= true;
3393 DRM_DEBUG("IH: HPD5\n");
3397 if (disp_int_cont2
& DC_HPD6_INTERRUPT
) {
3398 disp_int_cont2
&= ~DC_HPD6_INTERRUPT
;
3399 queue_hotplug
= true;
3400 DRM_DEBUG("IH: HPD6\n");
3404 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3409 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data
);
3410 r600_audio_schedule_polling(rdev
);
3412 case 176: /* CP_INT in ring buffer */
3413 case 177: /* CP_INT in IB1 */
3414 case 178: /* CP_INT in IB2 */
3415 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data
);
3416 radeon_fence_process(rdev
);
3418 case 181: /* CP EOP event */
3419 DRM_DEBUG("IH: CP EOP\n");
3421 case 233: /* GUI IDLE */
3422 DRM_DEBUG("IH: CP EOP\n");
3423 rdev
->pm
.gui_idle
= true;
3424 wake_up(&rdev
->irq
.idle_queue
);
3427 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3431 /* wptr/rptr are in bytes! */
3433 rptr
&= rdev
->ih
.ptr_mask
;
3435 /* make sure wptr hasn't changed while processing */
3436 wptr
= r600_get_ih_wptr(rdev
);
3437 if (wptr
!= rdev
->ih
.wptr
)
3440 queue_work(rdev
->wq
, &rdev
->hotplug_work
);
3441 rdev
->ih
.rptr
= rptr
;
3442 WREG32(IH_RB_RPTR
, rdev
->ih
.rptr
);
3443 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
3450 #if defined(CONFIG_DEBUG_FS)
3452 static int r600_debugfs_cp_ring_info(struct seq_file
*m
, void *data
)
3454 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
3455 struct drm_device
*dev
= node
->minor
->dev
;
3456 struct radeon_device
*rdev
= dev
->dev_private
;
3457 unsigned count
, i
, j
;
3459 radeon_ring_free_size(rdev
);
3460 count
= (rdev
->cp
.ring_size
/ 4) - rdev
->cp
.ring_free_dw
;
3461 seq_printf(m
, "CP_STAT 0x%08x\n", RREG32(CP_STAT
));
3462 seq_printf(m
, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR
));
3463 seq_printf(m
, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR
));
3464 seq_printf(m
, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev
->cp
.wptr
);
3465 seq_printf(m
, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev
->cp
.rptr
);
3466 seq_printf(m
, "%u free dwords in ring\n", rdev
->cp
.ring_free_dw
);
3467 seq_printf(m
, "%u dwords in ring\n", count
);
3469 for (j
= 0; j
<= count
; j
++) {
3470 seq_printf(m
, "r[%04d]=0x%08x\n", i
, rdev
->cp
.ring
[i
]);
3471 i
= (i
+ 1) & rdev
->cp
.ptr_mask
;
3476 static int r600_debugfs_mc_info(struct seq_file
*m
, void *data
)
3478 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
3479 struct drm_device
*dev
= node
->minor
->dev
;
3480 struct radeon_device
*rdev
= dev
->dev_private
;
3482 DREG32_SYS(m
, rdev
, R_000E50_SRBM_STATUS
);
3483 DREG32_SYS(m
, rdev
, VM_L2_STATUS
);
3487 static struct drm_info_list r600_mc_info_list
[] = {
3488 {"r600_mc_info", r600_debugfs_mc_info
, 0, NULL
},
3489 {"r600_ring_info", r600_debugfs_cp_ring_info
, 0, NULL
},
3493 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
)
3495 #if defined(CONFIG_DEBUG_FS)
3496 return radeon_debugfs_add_files(rdev
, r600_mc_info_list
, ARRAY_SIZE(r600_mc_info_list
));
3503 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3504 * rdev: radeon device structure
3505 * bo: buffer object struct which userspace is waiting for idle
3507 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3508 * through ring buffer, this leads to corruption in rendering, see
3509 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3510 * directly perform HDP flush by writing register through MMIO.
3512 void r600_ioctl_wait_idle(struct radeon_device
*rdev
, struct radeon_bo
*bo
)
3514 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);