]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #include <linux/slab.h> | |
29 | #include <linux/seq_file.h> | |
30 | #include <linux/firmware.h> | |
31 | #include <linux/platform_device.h> | |
32 | #include <linux/module.h> | |
33 | #include "drmP.h" | |
34 | #include "radeon_drm.h" | |
35 | #include "radeon.h" | |
36 | #include "radeon_asic.h" | |
37 | #include "radeon_mode.h" | |
38 | #include "r600d.h" | |
39 | #include "atom.h" | |
40 | #include "avivod.h" | |
41 | ||
42 | #define PFP_UCODE_SIZE 576 | |
43 | #define PM4_UCODE_SIZE 1792 | |
44 | #define RLC_UCODE_SIZE 768 | |
45 | #define R700_PFP_UCODE_SIZE 848 | |
46 | #define R700_PM4_UCODE_SIZE 1360 | |
47 | #define R700_RLC_UCODE_SIZE 1024 | |
48 | #define EVERGREEN_PFP_UCODE_SIZE 1120 | |
49 | #define EVERGREEN_PM4_UCODE_SIZE 1376 | |
50 | #define EVERGREEN_RLC_UCODE_SIZE 768 | |
51 | #define CAYMAN_RLC_UCODE_SIZE 1024 | |
52 | ||
53 | /* Firmware Names */ | |
54 | MODULE_FIRMWARE("radeon/R600_pfp.bin"); | |
55 | MODULE_FIRMWARE("radeon/R600_me.bin"); | |
56 | MODULE_FIRMWARE("radeon/RV610_pfp.bin"); | |
57 | MODULE_FIRMWARE("radeon/RV610_me.bin"); | |
58 | MODULE_FIRMWARE("radeon/RV630_pfp.bin"); | |
59 | MODULE_FIRMWARE("radeon/RV630_me.bin"); | |
60 | MODULE_FIRMWARE("radeon/RV620_pfp.bin"); | |
61 | MODULE_FIRMWARE("radeon/RV620_me.bin"); | |
62 | MODULE_FIRMWARE("radeon/RV635_pfp.bin"); | |
63 | MODULE_FIRMWARE("radeon/RV635_me.bin"); | |
64 | MODULE_FIRMWARE("radeon/RV670_pfp.bin"); | |
65 | MODULE_FIRMWARE("radeon/RV670_me.bin"); | |
66 | MODULE_FIRMWARE("radeon/RS780_pfp.bin"); | |
67 | MODULE_FIRMWARE("radeon/RS780_me.bin"); | |
68 | MODULE_FIRMWARE("radeon/RV770_pfp.bin"); | |
69 | MODULE_FIRMWARE("radeon/RV770_me.bin"); | |
70 | MODULE_FIRMWARE("radeon/RV730_pfp.bin"); | |
71 | MODULE_FIRMWARE("radeon/RV730_me.bin"); | |
72 | MODULE_FIRMWARE("radeon/RV710_pfp.bin"); | |
73 | MODULE_FIRMWARE("radeon/RV710_me.bin"); | |
74 | MODULE_FIRMWARE("radeon/R600_rlc.bin"); | |
75 | MODULE_FIRMWARE("radeon/R700_rlc.bin"); | |
76 | MODULE_FIRMWARE("radeon/CEDAR_pfp.bin"); | |
77 | MODULE_FIRMWARE("radeon/CEDAR_me.bin"); | |
78 | MODULE_FIRMWARE("radeon/CEDAR_rlc.bin"); | |
79 | MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin"); | |
80 | MODULE_FIRMWARE("radeon/REDWOOD_me.bin"); | |
81 | MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin"); | |
82 | MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin"); | |
83 | MODULE_FIRMWARE("radeon/JUNIPER_me.bin"); | |
84 | MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); | |
85 | MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); | |
86 | MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); | |
87 | MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); | |
88 | MODULE_FIRMWARE("radeon/PALM_pfp.bin"); | |
89 | MODULE_FIRMWARE("radeon/PALM_me.bin"); | |
90 | MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); | |
91 | MODULE_FIRMWARE("radeon/SUMO_pfp.bin"); | |
92 | MODULE_FIRMWARE("radeon/SUMO_me.bin"); | |
93 | MODULE_FIRMWARE("radeon/SUMO2_pfp.bin"); | |
94 | MODULE_FIRMWARE("radeon/SUMO2_me.bin"); | |
95 | ||
96 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); | |
97 | ||
98 | /* r600,rv610,rv630,rv620,rv635,rv670 */ | |
99 | int r600_mc_wait_for_idle(struct radeon_device *rdev); | |
100 | void r600_gpu_init(struct radeon_device *rdev); | |
101 | void r600_fini(struct radeon_device *rdev); | |
102 | void r600_irq_disable(struct radeon_device *rdev); | |
103 | static void r600_pcie_gen2_enable(struct radeon_device *rdev); | |
104 | ||
105 | /* get temperature in millidegrees */ | |
106 | int rv6xx_get_temp(struct radeon_device *rdev) | |
107 | { | |
108 | u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> | |
109 | ASIC_T_SHIFT; | |
110 | int actual_temp = temp & 0xff; | |
111 | ||
112 | if (temp & 0x100) | |
113 | actual_temp -= 256; | |
114 | ||
115 | return actual_temp * 1000; | |
116 | } | |
117 | ||
118 | void r600_pm_get_dynpm_state(struct radeon_device *rdev) | |
119 | { | |
120 | int i; | |
121 | ||
122 | rdev->pm.dynpm_can_upclock = true; | |
123 | rdev->pm.dynpm_can_downclock = true; | |
124 | ||
125 | /* power state array is low to high, default is first */ | |
126 | if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) { | |
127 | int min_power_state_index = 0; | |
128 | ||
129 | if (rdev->pm.num_power_states > 2) | |
130 | min_power_state_index = 1; | |
131 | ||
132 | switch (rdev->pm.dynpm_planned_action) { | |
133 | case DYNPM_ACTION_MINIMUM: | |
134 | rdev->pm.requested_power_state_index = min_power_state_index; | |
135 | rdev->pm.requested_clock_mode_index = 0; | |
136 | rdev->pm.dynpm_can_downclock = false; | |
137 | break; | |
138 | case DYNPM_ACTION_DOWNCLOCK: | |
139 | if (rdev->pm.current_power_state_index == min_power_state_index) { | |
140 | rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; | |
141 | rdev->pm.dynpm_can_downclock = false; | |
142 | } else { | |
143 | if (rdev->pm.active_crtc_count > 1) { | |
144 | for (i = 0; i < rdev->pm.num_power_states; i++) { | |
145 | if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) | |
146 | continue; | |
147 | else if (i >= rdev->pm.current_power_state_index) { | |
148 | rdev->pm.requested_power_state_index = | |
149 | rdev->pm.current_power_state_index; | |
150 | break; | |
151 | } else { | |
152 | rdev->pm.requested_power_state_index = i; | |
153 | break; | |
154 | } | |
155 | } | |
156 | } else { | |
157 | if (rdev->pm.current_power_state_index == 0) | |
158 | rdev->pm.requested_power_state_index = | |
159 | rdev->pm.num_power_states - 1; | |
160 | else | |
161 | rdev->pm.requested_power_state_index = | |
162 | rdev->pm.current_power_state_index - 1; | |
163 | } | |
164 | } | |
165 | rdev->pm.requested_clock_mode_index = 0; | |
166 | /* don't use the power state if crtcs are active and no display flag is set */ | |
167 | if ((rdev->pm.active_crtc_count > 0) && | |
168 | (rdev->pm.power_state[rdev->pm.requested_power_state_index]. | |
169 | clock_info[rdev->pm.requested_clock_mode_index].flags & | |
170 | RADEON_PM_MODE_NO_DISPLAY)) { | |
171 | rdev->pm.requested_power_state_index++; | |
172 | } | |
173 | break; | |
174 | case DYNPM_ACTION_UPCLOCK: | |
175 | if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { | |
176 | rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; | |
177 | rdev->pm.dynpm_can_upclock = false; | |
178 | } else { | |
179 | if (rdev->pm.active_crtc_count > 1) { | |
180 | for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { | |
181 | if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) | |
182 | continue; | |
183 | else if (i <= rdev->pm.current_power_state_index) { | |
184 | rdev->pm.requested_power_state_index = | |
185 | rdev->pm.current_power_state_index; | |
186 | break; | |
187 | } else { | |
188 | rdev->pm.requested_power_state_index = i; | |
189 | break; | |
190 | } | |
191 | } | |
192 | } else | |
193 | rdev->pm.requested_power_state_index = | |
194 | rdev->pm.current_power_state_index + 1; | |
195 | } | |
196 | rdev->pm.requested_clock_mode_index = 0; | |
197 | break; | |
198 | case DYNPM_ACTION_DEFAULT: | |
199 | rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; | |
200 | rdev->pm.requested_clock_mode_index = 0; | |
201 | rdev->pm.dynpm_can_upclock = false; | |
202 | break; | |
203 | case DYNPM_ACTION_NONE: | |
204 | default: | |
205 | DRM_ERROR("Requested mode for not defined action\n"); | |
206 | return; | |
207 | } | |
208 | } else { | |
209 | /* XXX select a power state based on AC/DC, single/dualhead, etc. */ | |
210 | /* for now just select the first power state and switch between clock modes */ | |
211 | /* power state array is low to high, default is first (0) */ | |
212 | if (rdev->pm.active_crtc_count > 1) { | |
213 | rdev->pm.requested_power_state_index = -1; | |
214 | /* start at 1 as we don't want the default mode */ | |
215 | for (i = 1; i < rdev->pm.num_power_states; i++) { | |
216 | if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) | |
217 | continue; | |
218 | else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) || | |
219 | (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) { | |
220 | rdev->pm.requested_power_state_index = i; | |
221 | break; | |
222 | } | |
223 | } | |
224 | /* if nothing selected, grab the default state. */ | |
225 | if (rdev->pm.requested_power_state_index == -1) | |
226 | rdev->pm.requested_power_state_index = 0; | |
227 | } else | |
228 | rdev->pm.requested_power_state_index = 1; | |
229 | ||
230 | switch (rdev->pm.dynpm_planned_action) { | |
231 | case DYNPM_ACTION_MINIMUM: | |
232 | rdev->pm.requested_clock_mode_index = 0; | |
233 | rdev->pm.dynpm_can_downclock = false; | |
234 | break; | |
235 | case DYNPM_ACTION_DOWNCLOCK: | |
236 | if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { | |
237 | if (rdev->pm.current_clock_mode_index == 0) { | |
238 | rdev->pm.requested_clock_mode_index = 0; | |
239 | rdev->pm.dynpm_can_downclock = false; | |
240 | } else | |
241 | rdev->pm.requested_clock_mode_index = | |
242 | rdev->pm.current_clock_mode_index - 1; | |
243 | } else { | |
244 | rdev->pm.requested_clock_mode_index = 0; | |
245 | rdev->pm.dynpm_can_downclock = false; | |
246 | } | |
247 | /* don't use the power state if crtcs are active and no display flag is set */ | |
248 | if ((rdev->pm.active_crtc_count > 0) && | |
249 | (rdev->pm.power_state[rdev->pm.requested_power_state_index]. | |
250 | clock_info[rdev->pm.requested_clock_mode_index].flags & | |
251 | RADEON_PM_MODE_NO_DISPLAY)) { | |
252 | rdev->pm.requested_clock_mode_index++; | |
253 | } | |
254 | break; | |
255 | case DYNPM_ACTION_UPCLOCK: | |
256 | if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { | |
257 | if (rdev->pm.current_clock_mode_index == | |
258 | (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) { | |
259 | rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index; | |
260 | rdev->pm.dynpm_can_upclock = false; | |
261 | } else | |
262 | rdev->pm.requested_clock_mode_index = | |
263 | rdev->pm.current_clock_mode_index + 1; | |
264 | } else { | |
265 | rdev->pm.requested_clock_mode_index = | |
266 | rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1; | |
267 | rdev->pm.dynpm_can_upclock = false; | |
268 | } | |
269 | break; | |
270 | case DYNPM_ACTION_DEFAULT: | |
271 | rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; | |
272 | rdev->pm.requested_clock_mode_index = 0; | |
273 | rdev->pm.dynpm_can_upclock = false; | |
274 | break; | |
275 | case DYNPM_ACTION_NONE: | |
276 | default: | |
277 | DRM_ERROR("Requested mode for not defined action\n"); | |
278 | return; | |
279 | } | |
280 | } | |
281 | ||
282 | DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", | |
283 | rdev->pm.power_state[rdev->pm.requested_power_state_index]. | |
284 | clock_info[rdev->pm.requested_clock_mode_index].sclk, | |
285 | rdev->pm.power_state[rdev->pm.requested_power_state_index]. | |
286 | clock_info[rdev->pm.requested_clock_mode_index].mclk, | |
287 | rdev->pm.power_state[rdev->pm.requested_power_state_index]. | |
288 | pcie_lanes); | |
289 | } | |
290 | ||
291 | void rs780_pm_init_profile(struct radeon_device *rdev) | |
292 | { | |
293 | if (rdev->pm.num_power_states == 2) { | |
294 | /* default */ | |
295 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | |
296 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | |
297 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | |
298 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; | |
299 | /* low sh */ | |
300 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; | |
301 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; | |
302 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | |
303 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | |
304 | /* mid sh */ | |
305 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; | |
306 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; | |
307 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | |
308 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; | |
309 | /* high sh */ | |
310 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; | |
311 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; | |
312 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | |
313 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; | |
314 | /* low mh */ | |
315 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; | |
316 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; | |
317 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | |
318 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | |
319 | /* mid mh */ | |
320 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; | |
321 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; | |
322 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | |
323 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; | |
324 | /* high mh */ | |
325 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; | |
326 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1; | |
327 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | |
328 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; | |
329 | } else if (rdev->pm.num_power_states == 3) { | |
330 | /* default */ | |
331 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | |
332 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | |
333 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | |
334 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; | |
335 | /* low sh */ | |
336 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; | |
337 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; | |
338 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | |
339 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | |
340 | /* mid sh */ | |
341 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; | |
342 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; | |
343 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | |
344 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; | |
345 | /* high sh */ | |
346 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; | |
347 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2; | |
348 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | |
349 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; | |
350 | /* low mh */ | |
351 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1; | |
352 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1; | |
353 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | |
354 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | |
355 | /* mid mh */ | |
356 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1; | |
357 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1; | |
358 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | |
359 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; | |
360 | /* high mh */ | |
361 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1; | |
362 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; | |
363 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | |
364 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; | |
365 | } else { | |
366 | /* default */ | |
367 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | |
368 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | |
369 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | |
370 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; | |
371 | /* low sh */ | |
372 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2; | |
373 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2; | |
374 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | |
375 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | |
376 | /* mid sh */ | |
377 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2; | |
378 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2; | |
379 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | |
380 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; | |
381 | /* high sh */ | |
382 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2; | |
383 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3; | |
384 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | |
385 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; | |
386 | /* low mh */ | |
387 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; | |
388 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; | |
389 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | |
390 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | |
391 | /* mid mh */ | |
392 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; | |
393 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; | |
394 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | |
395 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; | |
396 | /* high mh */ | |
397 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; | |
398 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3; | |
399 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | |
400 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; | |
401 | } | |
402 | } | |
403 | ||
404 | void r600_pm_init_profile(struct radeon_device *rdev) | |
405 | { | |
406 | int idx; | |
407 | ||
408 | if (rdev->family == CHIP_R600) { | |
409 | /* XXX */ | |
410 | /* default */ | |
411 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | |
412 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | |
413 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | |
414 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; | |
415 | /* low sh */ | |
416 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | |
417 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | |
418 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | |
419 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | |
420 | /* mid sh */ | |
421 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | |
422 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | |
423 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | |
424 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; | |
425 | /* high sh */ | |
426 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | |
427 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | |
428 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | |
429 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; | |
430 | /* low mh */ | |
431 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | |
432 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | |
433 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | |
434 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | |
435 | /* mid mh */ | |
436 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | |
437 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | |
438 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | |
439 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; | |
440 | /* high mh */ | |
441 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | |
442 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | |
443 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | |
444 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; | |
445 | } else { | |
446 | if (rdev->pm.num_power_states < 4) { | |
447 | /* default */ | |
448 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | |
449 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | |
450 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | |
451 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; | |
452 | /* low sh */ | |
453 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; | |
454 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; | |
455 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | |
456 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | |
457 | /* mid sh */ | |
458 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; | |
459 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; | |
460 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | |
461 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; | |
462 | /* high sh */ | |
463 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; | |
464 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; | |
465 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | |
466 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; | |
467 | /* low mh */ | |
468 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; | |
469 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2; | |
470 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | |
471 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | |
472 | /* low mh */ | |
473 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; | |
474 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2; | |
475 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | |
476 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; | |
477 | /* high mh */ | |
478 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; | |
479 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; | |
480 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | |
481 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; | |
482 | } else { | |
483 | /* default */ | |
484 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | |
485 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | |
486 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | |
487 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; | |
488 | /* low sh */ | |
489 | if (rdev->flags & RADEON_IS_MOBILITY) | |
490 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | |
491 | else | |
492 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | |
493 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; | |
494 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; | |
495 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | |
496 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | |
497 | /* mid sh */ | |
498 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; | |
499 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; | |
500 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | |
501 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; | |
502 | /* high sh */ | |
503 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | |
504 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; | |
505 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; | |
506 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | |
507 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; | |
508 | /* low mh */ | |
509 | if (rdev->flags & RADEON_IS_MOBILITY) | |
510 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); | |
511 | else | |
512 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | |
513 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; | |
514 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; | |
515 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | |
516 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | |
517 | /* mid mh */ | |
518 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; | |
519 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; | |
520 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | |
521 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; | |
522 | /* high mh */ | |
523 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | |
524 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; | |
525 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; | |
526 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | |
527 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; | |
528 | } | |
529 | } | |
530 | } | |
531 | ||
532 | void r600_pm_misc(struct radeon_device *rdev) | |
533 | { | |
534 | int req_ps_idx = rdev->pm.requested_power_state_index; | |
535 | int req_cm_idx = rdev->pm.requested_clock_mode_index; | |
536 | struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; | |
537 | struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; | |
538 | ||
539 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { | |
540 | /* 0xff01 is a flag rather then an actual voltage */ | |
541 | if (voltage->voltage == 0xff01) | |
542 | return; | |
543 | if (voltage->voltage != rdev->pm.current_vddc) { | |
544 | radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); | |
545 | rdev->pm.current_vddc = voltage->voltage; | |
546 | DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); | |
547 | } | |
548 | } | |
549 | } | |
550 | ||
551 | bool r600_gui_idle(struct radeon_device *rdev) | |
552 | { | |
553 | if (RREG32(GRBM_STATUS) & GUI_ACTIVE) | |
554 | return false; | |
555 | else | |
556 | return true; | |
557 | } | |
558 | ||
559 | /* hpd for digital panel detect/disconnect */ | |
560 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) | |
561 | { | |
562 | bool connected = false; | |
563 | ||
564 | if (ASIC_IS_DCE3(rdev)) { | |
565 | switch (hpd) { | |
566 | case RADEON_HPD_1: | |
567 | if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) | |
568 | connected = true; | |
569 | break; | |
570 | case RADEON_HPD_2: | |
571 | if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) | |
572 | connected = true; | |
573 | break; | |
574 | case RADEON_HPD_3: | |
575 | if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) | |
576 | connected = true; | |
577 | break; | |
578 | case RADEON_HPD_4: | |
579 | if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) | |
580 | connected = true; | |
581 | break; | |
582 | /* DCE 3.2 */ | |
583 | case RADEON_HPD_5: | |
584 | if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) | |
585 | connected = true; | |
586 | break; | |
587 | case RADEON_HPD_6: | |
588 | if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) | |
589 | connected = true; | |
590 | break; | |
591 | default: | |
592 | break; | |
593 | } | |
594 | } else { | |
595 | switch (hpd) { | |
596 | case RADEON_HPD_1: | |
597 | if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) | |
598 | connected = true; | |
599 | break; | |
600 | case RADEON_HPD_2: | |
601 | if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) | |
602 | connected = true; | |
603 | break; | |
604 | case RADEON_HPD_3: | |
605 | if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) | |
606 | connected = true; | |
607 | break; | |
608 | default: | |
609 | break; | |
610 | } | |
611 | } | |
612 | return connected; | |
613 | } | |
614 | ||
615 | void r600_hpd_set_polarity(struct radeon_device *rdev, | |
616 | enum radeon_hpd_id hpd) | |
617 | { | |
618 | u32 tmp; | |
619 | bool connected = r600_hpd_sense(rdev, hpd); | |
620 | ||
621 | if (ASIC_IS_DCE3(rdev)) { | |
622 | switch (hpd) { | |
623 | case RADEON_HPD_1: | |
624 | tmp = RREG32(DC_HPD1_INT_CONTROL); | |
625 | if (connected) | |
626 | tmp &= ~DC_HPDx_INT_POLARITY; | |
627 | else | |
628 | tmp |= DC_HPDx_INT_POLARITY; | |
629 | WREG32(DC_HPD1_INT_CONTROL, tmp); | |
630 | break; | |
631 | case RADEON_HPD_2: | |
632 | tmp = RREG32(DC_HPD2_INT_CONTROL); | |
633 | if (connected) | |
634 | tmp &= ~DC_HPDx_INT_POLARITY; | |
635 | else | |
636 | tmp |= DC_HPDx_INT_POLARITY; | |
637 | WREG32(DC_HPD2_INT_CONTROL, tmp); | |
638 | break; | |
639 | case RADEON_HPD_3: | |
640 | tmp = RREG32(DC_HPD3_INT_CONTROL); | |
641 | if (connected) | |
642 | tmp &= ~DC_HPDx_INT_POLARITY; | |
643 | else | |
644 | tmp |= DC_HPDx_INT_POLARITY; | |
645 | WREG32(DC_HPD3_INT_CONTROL, tmp); | |
646 | break; | |
647 | case RADEON_HPD_4: | |
648 | tmp = RREG32(DC_HPD4_INT_CONTROL); | |
649 | if (connected) | |
650 | tmp &= ~DC_HPDx_INT_POLARITY; | |
651 | else | |
652 | tmp |= DC_HPDx_INT_POLARITY; | |
653 | WREG32(DC_HPD4_INT_CONTROL, tmp); | |
654 | break; | |
655 | case RADEON_HPD_5: | |
656 | tmp = RREG32(DC_HPD5_INT_CONTROL); | |
657 | if (connected) | |
658 | tmp &= ~DC_HPDx_INT_POLARITY; | |
659 | else | |
660 | tmp |= DC_HPDx_INT_POLARITY; | |
661 | WREG32(DC_HPD5_INT_CONTROL, tmp); | |
662 | break; | |
663 | /* DCE 3.2 */ | |
664 | case RADEON_HPD_6: | |
665 | tmp = RREG32(DC_HPD6_INT_CONTROL); | |
666 | if (connected) | |
667 | tmp &= ~DC_HPDx_INT_POLARITY; | |
668 | else | |
669 | tmp |= DC_HPDx_INT_POLARITY; | |
670 | WREG32(DC_HPD6_INT_CONTROL, tmp); | |
671 | break; | |
672 | default: | |
673 | break; | |
674 | } | |
675 | } else { | |
676 | switch (hpd) { | |
677 | case RADEON_HPD_1: | |
678 | tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); | |
679 | if (connected) | |
680 | tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; | |
681 | else | |
682 | tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; | |
683 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); | |
684 | break; | |
685 | case RADEON_HPD_2: | |
686 | tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); | |
687 | if (connected) | |
688 | tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; | |
689 | else | |
690 | tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; | |
691 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); | |
692 | break; | |
693 | case RADEON_HPD_3: | |
694 | tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); | |
695 | if (connected) | |
696 | tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; | |
697 | else | |
698 | tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; | |
699 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); | |
700 | break; | |
701 | default: | |
702 | break; | |
703 | } | |
704 | } | |
705 | } | |
706 | ||
707 | void r600_hpd_init(struct radeon_device *rdev) | |
708 | { | |
709 | struct drm_device *dev = rdev->ddev; | |
710 | struct drm_connector *connector; | |
711 | ||
712 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | |
713 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | |
714 | ||
715 | if (ASIC_IS_DCE3(rdev)) { | |
716 | u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); | |
717 | if (ASIC_IS_DCE32(rdev)) | |
718 | tmp |= DC_HPDx_EN; | |
719 | ||
720 | switch (radeon_connector->hpd.hpd) { | |
721 | case RADEON_HPD_1: | |
722 | WREG32(DC_HPD1_CONTROL, tmp); | |
723 | rdev->irq.hpd[0] = true; | |
724 | break; | |
725 | case RADEON_HPD_2: | |
726 | WREG32(DC_HPD2_CONTROL, tmp); | |
727 | rdev->irq.hpd[1] = true; | |
728 | break; | |
729 | case RADEON_HPD_3: | |
730 | WREG32(DC_HPD3_CONTROL, tmp); | |
731 | rdev->irq.hpd[2] = true; | |
732 | break; | |
733 | case RADEON_HPD_4: | |
734 | WREG32(DC_HPD4_CONTROL, tmp); | |
735 | rdev->irq.hpd[3] = true; | |
736 | break; | |
737 | /* DCE 3.2 */ | |
738 | case RADEON_HPD_5: | |
739 | WREG32(DC_HPD5_CONTROL, tmp); | |
740 | rdev->irq.hpd[4] = true; | |
741 | break; | |
742 | case RADEON_HPD_6: | |
743 | WREG32(DC_HPD6_CONTROL, tmp); | |
744 | rdev->irq.hpd[5] = true; | |
745 | break; | |
746 | default: | |
747 | break; | |
748 | } | |
749 | } else { | |
750 | switch (radeon_connector->hpd.hpd) { | |
751 | case RADEON_HPD_1: | |
752 | WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); | |
753 | rdev->irq.hpd[0] = true; | |
754 | break; | |
755 | case RADEON_HPD_2: | |
756 | WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN); | |
757 | rdev->irq.hpd[1] = true; | |
758 | break; | |
759 | case RADEON_HPD_3: | |
760 | WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN); | |
761 | rdev->irq.hpd[2] = true; | |
762 | break; | |
763 | default: | |
764 | break; | |
765 | } | |
766 | } | |
767 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | |
768 | } | |
769 | if (rdev->irq.installed) | |
770 | r600_irq_set(rdev); | |
771 | } | |
772 | ||
773 | void r600_hpd_fini(struct radeon_device *rdev) | |
774 | { | |
775 | struct drm_device *dev = rdev->ddev; | |
776 | struct drm_connector *connector; | |
777 | ||
778 | if (ASIC_IS_DCE3(rdev)) { | |
779 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | |
780 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | |
781 | switch (radeon_connector->hpd.hpd) { | |
782 | case RADEON_HPD_1: | |
783 | WREG32(DC_HPD1_CONTROL, 0); | |
784 | rdev->irq.hpd[0] = false; | |
785 | break; | |
786 | case RADEON_HPD_2: | |
787 | WREG32(DC_HPD2_CONTROL, 0); | |
788 | rdev->irq.hpd[1] = false; | |
789 | break; | |
790 | case RADEON_HPD_3: | |
791 | WREG32(DC_HPD3_CONTROL, 0); | |
792 | rdev->irq.hpd[2] = false; | |
793 | break; | |
794 | case RADEON_HPD_4: | |
795 | WREG32(DC_HPD4_CONTROL, 0); | |
796 | rdev->irq.hpd[3] = false; | |
797 | break; | |
798 | /* DCE 3.2 */ | |
799 | case RADEON_HPD_5: | |
800 | WREG32(DC_HPD5_CONTROL, 0); | |
801 | rdev->irq.hpd[4] = false; | |
802 | break; | |
803 | case RADEON_HPD_6: | |
804 | WREG32(DC_HPD6_CONTROL, 0); | |
805 | rdev->irq.hpd[5] = false; | |
806 | break; | |
807 | default: | |
808 | break; | |
809 | } | |
810 | } | |
811 | } else { | |
812 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | |
813 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | |
814 | switch (radeon_connector->hpd.hpd) { | |
815 | case RADEON_HPD_1: | |
816 | WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0); | |
817 | rdev->irq.hpd[0] = false; | |
818 | break; | |
819 | case RADEON_HPD_2: | |
820 | WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0); | |
821 | rdev->irq.hpd[1] = false; | |
822 | break; | |
823 | case RADEON_HPD_3: | |
824 | WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0); | |
825 | rdev->irq.hpd[2] = false; | |
826 | break; | |
827 | default: | |
828 | break; | |
829 | } | |
830 | } | |
831 | } | |
832 | } | |
833 | ||
834 | /* | |
835 | * R600 PCIE GART | |
836 | */ | |
837 | void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) | |
838 | { | |
839 | unsigned i; | |
840 | u32 tmp; | |
841 | ||
842 | /* flush hdp cache so updates hit vram */ | |
843 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && | |
844 | !(rdev->flags & RADEON_IS_AGP)) { | |
845 | void __iomem *ptr = (void *)rdev->gart.ptr; | |
846 | u32 tmp; | |
847 | ||
848 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read | |
849 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL | |
850 | * This seems to cause problems on some AGP cards. Just use the old | |
851 | * method for them. | |
852 | */ | |
853 | WREG32(HDP_DEBUG1, 0); | |
854 | tmp = readl((void __iomem *)ptr); | |
855 | } else | |
856 | WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | |
857 | ||
858 | WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); | |
859 | WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12); | |
860 | WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); | |
861 | for (i = 0; i < rdev->usec_timeout; i++) { | |
862 | /* read MC_STATUS */ | |
863 | tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE); | |
864 | tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT; | |
865 | if (tmp == 2) { | |
866 | printk(KERN_WARNING "[drm] r600 flush TLB failed\n"); | |
867 | return; | |
868 | } | |
869 | if (tmp) { | |
870 | return; | |
871 | } | |
872 | udelay(1); | |
873 | } | |
874 | } | |
875 | ||
876 | int r600_pcie_gart_init(struct radeon_device *rdev) | |
877 | { | |
878 | int r; | |
879 | ||
880 | if (rdev->gart.robj) { | |
881 | WARN(1, "R600 PCIE GART already initialized\n"); | |
882 | return 0; | |
883 | } | |
884 | /* Initialize common gart structure */ | |
885 | r = radeon_gart_init(rdev); | |
886 | if (r) | |
887 | return r; | |
888 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; | |
889 | return radeon_gart_table_vram_alloc(rdev); | |
890 | } | |
891 | ||
892 | int r600_pcie_gart_enable(struct radeon_device *rdev) | |
893 | { | |
894 | u32 tmp; | |
895 | int r, i; | |
896 | ||
897 | if (rdev->gart.robj == NULL) { | |
898 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); | |
899 | return -EINVAL; | |
900 | } | |
901 | r = radeon_gart_table_vram_pin(rdev); | |
902 | if (r) | |
903 | return r; | |
904 | radeon_gart_restore(rdev); | |
905 | ||
906 | /* Setup L2 cache */ | |
907 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | | |
908 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | | |
909 | EFFECTIVE_L2_QUEUE_SIZE(7)); | |
910 | WREG32(VM_L2_CNTL2, 0); | |
911 | WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); | |
912 | /* Setup TLB control */ | |
913 | tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | | |
914 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | |
915 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | | |
916 | ENABLE_WAIT_L2_QUERY; | |
917 | WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); | |
918 | WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); | |
919 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); | |
920 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); | |
921 | WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); | |
922 | WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); | |
923 | WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); | |
924 | WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); | |
925 | WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); | |
926 | WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); | |
927 | WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); | |
928 | WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); | |
929 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | |
930 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | |
931 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | |
932 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); | |
933 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | |
934 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | |
935 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | |
936 | WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | |
937 | (u32)(rdev->dummy_page.addr >> 12)); | |
938 | for (i = 1; i < 7; i++) | |
939 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); | |
940 | ||
941 | r600_pcie_gart_tlb_flush(rdev); | |
942 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | |
943 | (unsigned)(rdev->mc.gtt_size >> 20), | |
944 | (unsigned long long)rdev->gart.table_addr); | |
945 | rdev->gart.ready = true; | |
946 | return 0; | |
947 | } | |
948 | ||
949 | void r600_pcie_gart_disable(struct radeon_device *rdev) | |
950 | { | |
951 | u32 tmp; | |
952 | int i; | |
953 | ||
954 | /* Disable all tables */ | |
955 | for (i = 0; i < 7; i++) | |
956 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); | |
957 | ||
958 | /* Disable L2 cache */ | |
959 | WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | | |
960 | EFFECTIVE_L2_QUEUE_SIZE(7)); | |
961 | WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); | |
962 | /* Setup L1 TLB control */ | |
963 | tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | | |
964 | ENABLE_WAIT_L2_QUERY; | |
965 | WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); | |
966 | WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); | |
967 | WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); | |
968 | WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); | |
969 | WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); | |
970 | WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); | |
971 | WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); | |
972 | WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); | |
973 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp); | |
974 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp); | |
975 | WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); | |
976 | WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); | |
977 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); | |
978 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); | |
979 | radeon_gart_table_vram_unpin(rdev); | |
980 | } | |
981 | ||
982 | void r600_pcie_gart_fini(struct radeon_device *rdev) | |
983 | { | |
984 | radeon_gart_fini(rdev); | |
985 | r600_pcie_gart_disable(rdev); | |
986 | radeon_gart_table_vram_free(rdev); | |
987 | } | |
988 | ||
989 | void r600_agp_enable(struct radeon_device *rdev) | |
990 | { | |
991 | u32 tmp; | |
992 | int i; | |
993 | ||
994 | /* Setup L2 cache */ | |
995 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | | |
996 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | | |
997 | EFFECTIVE_L2_QUEUE_SIZE(7)); | |
998 | WREG32(VM_L2_CNTL2, 0); | |
999 | WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); | |
1000 | /* Setup TLB control */ | |
1001 | tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | | |
1002 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | |
1003 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | | |
1004 | ENABLE_WAIT_L2_QUERY; | |
1005 | WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); | |
1006 | WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); | |
1007 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); | |
1008 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); | |
1009 | WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); | |
1010 | WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); | |
1011 | WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); | |
1012 | WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); | |
1013 | WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); | |
1014 | WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); | |
1015 | WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); | |
1016 | WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); | |
1017 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | |
1018 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | |
1019 | for (i = 0; i < 7; i++) | |
1020 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); | |
1021 | } | |
1022 | ||
1023 | int r600_mc_wait_for_idle(struct radeon_device *rdev) | |
1024 | { | |
1025 | unsigned i; | |
1026 | u32 tmp; | |
1027 | ||
1028 | for (i = 0; i < rdev->usec_timeout; i++) { | |
1029 | /* read MC_STATUS */ | |
1030 | tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00; | |
1031 | if (!tmp) | |
1032 | return 0; | |
1033 | udelay(1); | |
1034 | } | |
1035 | return -1; | |
1036 | } | |
1037 | ||
1038 | static void r600_mc_program(struct radeon_device *rdev) | |
1039 | { | |
1040 | struct rv515_mc_save save; | |
1041 | u32 tmp; | |
1042 | int i, j; | |
1043 | ||
1044 | /* Initialize HDP */ | |
1045 | for (i = 0, j = 0; i < 32; i++, j += 0x18) { | |
1046 | WREG32((0x2c14 + j), 0x00000000); | |
1047 | WREG32((0x2c18 + j), 0x00000000); | |
1048 | WREG32((0x2c1c + j), 0x00000000); | |
1049 | WREG32((0x2c20 + j), 0x00000000); | |
1050 | WREG32((0x2c24 + j), 0x00000000); | |
1051 | } | |
1052 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); | |
1053 | ||
1054 | rv515_mc_stop(rdev, &save); | |
1055 | if (r600_mc_wait_for_idle(rdev)) { | |
1056 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | |
1057 | } | |
1058 | /* Lockout access through VGA aperture (doesn't exist before R600) */ | |
1059 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); | |
1060 | /* Update configuration */ | |
1061 | if (rdev->flags & RADEON_IS_AGP) { | |
1062 | if (rdev->mc.vram_start < rdev->mc.gtt_start) { | |
1063 | /* VRAM before AGP */ | |
1064 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | |
1065 | rdev->mc.vram_start >> 12); | |
1066 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | |
1067 | rdev->mc.gtt_end >> 12); | |
1068 | } else { | |
1069 | /* VRAM after AGP */ | |
1070 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | |
1071 | rdev->mc.gtt_start >> 12); | |
1072 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | |
1073 | rdev->mc.vram_end >> 12); | |
1074 | } | |
1075 | } else { | |
1076 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); | |
1077 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); | |
1078 | } | |
1079 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); | |
1080 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; | |
1081 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); | |
1082 | WREG32(MC_VM_FB_LOCATION, tmp); | |
1083 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | |
1084 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | |
1085 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); | |
1086 | if (rdev->flags & RADEON_IS_AGP) { | |
1087 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); | |
1088 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); | |
1089 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); | |
1090 | } else { | |
1091 | WREG32(MC_VM_AGP_BASE, 0); | |
1092 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); | |
1093 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); | |
1094 | } | |
1095 | if (r600_mc_wait_for_idle(rdev)) { | |
1096 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | |
1097 | } | |
1098 | rv515_mc_resume(rdev, &save); | |
1099 | /* we need to own VRAM, so turn off the VGA renderer here | |
1100 | * to stop it overwriting our objects */ | |
1101 | rv515_vga_render_disable(rdev); | |
1102 | } | |
1103 | ||
1104 | /** | |
1105 | * r600_vram_gtt_location - try to find VRAM & GTT location | |
1106 | * @rdev: radeon device structure holding all necessary informations | |
1107 | * @mc: memory controller structure holding memory informations | |
1108 | * | |
1109 | * Function will place try to place VRAM at same place as in CPU (PCI) | |
1110 | * address space as some GPU seems to have issue when we reprogram at | |
1111 | * different address space. | |
1112 | * | |
1113 | * If there is not enough space to fit the unvisible VRAM after the | |
1114 | * aperture then we limit the VRAM size to the aperture. | |
1115 | * | |
1116 | * If we are using AGP then place VRAM adjacent to AGP aperture are we need | |
1117 | * them to be in one from GPU point of view so that we can program GPU to | |
1118 | * catch access outside them (weird GPU policy see ??). | |
1119 | * | |
1120 | * This function will never fails, worst case are limiting VRAM or GTT. | |
1121 | * | |
1122 | * Note: GTT start, end, size should be initialized before calling this | |
1123 | * function on AGP platform. | |
1124 | */ | |
1125 | static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |
1126 | { | |
1127 | u64 size_bf, size_af; | |
1128 | ||
1129 | if (mc->mc_vram_size > 0xE0000000) { | |
1130 | /* leave room for at least 512M GTT */ | |
1131 | dev_warn(rdev->dev, "limiting VRAM\n"); | |
1132 | mc->real_vram_size = 0xE0000000; | |
1133 | mc->mc_vram_size = 0xE0000000; | |
1134 | } | |
1135 | if (rdev->flags & RADEON_IS_AGP) { | |
1136 | size_bf = mc->gtt_start; | |
1137 | size_af = 0xFFFFFFFF - mc->gtt_end + 1; | |
1138 | if (size_bf > size_af) { | |
1139 | if (mc->mc_vram_size > size_bf) { | |
1140 | dev_warn(rdev->dev, "limiting VRAM\n"); | |
1141 | mc->real_vram_size = size_bf; | |
1142 | mc->mc_vram_size = size_bf; | |
1143 | } | |
1144 | mc->vram_start = mc->gtt_start - mc->mc_vram_size; | |
1145 | } else { | |
1146 | if (mc->mc_vram_size > size_af) { | |
1147 | dev_warn(rdev->dev, "limiting VRAM\n"); | |
1148 | mc->real_vram_size = size_af; | |
1149 | mc->mc_vram_size = size_af; | |
1150 | } | |
1151 | mc->vram_start = mc->gtt_end; | |
1152 | } | |
1153 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | |
1154 | dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", | |
1155 | mc->mc_vram_size >> 20, mc->vram_start, | |
1156 | mc->vram_end, mc->real_vram_size >> 20); | |
1157 | } else { | |
1158 | u64 base = 0; | |
1159 | if (rdev->flags & RADEON_IS_IGP) { | |
1160 | base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF; | |
1161 | base <<= 24; | |
1162 | } | |
1163 | radeon_vram_location(rdev, &rdev->mc, base); | |
1164 | rdev->mc.gtt_base_align = 0; | |
1165 | radeon_gtt_location(rdev, mc); | |
1166 | } | |
1167 | } | |
1168 | ||
1169 | int r600_mc_init(struct radeon_device *rdev) | |
1170 | { | |
1171 | u32 tmp; | |
1172 | int chansize, numchan; | |
1173 | ||
1174 | /* Get VRAM informations */ | |
1175 | rdev->mc.vram_is_ddr = true; | |
1176 | tmp = RREG32(RAMCFG); | |
1177 | if (tmp & CHANSIZE_OVERRIDE) { | |
1178 | chansize = 16; | |
1179 | } else if (tmp & CHANSIZE_MASK) { | |
1180 | chansize = 64; | |
1181 | } else { | |
1182 | chansize = 32; | |
1183 | } | |
1184 | tmp = RREG32(CHMAP); | |
1185 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | |
1186 | case 0: | |
1187 | default: | |
1188 | numchan = 1; | |
1189 | break; | |
1190 | case 1: | |
1191 | numchan = 2; | |
1192 | break; | |
1193 | case 2: | |
1194 | numchan = 4; | |
1195 | break; | |
1196 | case 3: | |
1197 | numchan = 8; | |
1198 | break; | |
1199 | } | |
1200 | rdev->mc.vram_width = numchan * chansize; | |
1201 | /* Could aper size report 0 ? */ | |
1202 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); | |
1203 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); | |
1204 | /* Setup GPU memory space */ | |
1205 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); | |
1206 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); | |
1207 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | |
1208 | r600_vram_gtt_location(rdev, &rdev->mc); | |
1209 | ||
1210 | if (rdev->flags & RADEON_IS_IGP) { | |
1211 | rs690_pm_info(rdev); | |
1212 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | |
1213 | } | |
1214 | radeon_update_bandwidth_info(rdev); | |
1215 | return 0; | |
1216 | } | |
1217 | ||
1218 | int r600_vram_scratch_init(struct radeon_device *rdev) | |
1219 | { | |
1220 | int r; | |
1221 | ||
1222 | if (rdev->vram_scratch.robj == NULL) { | |
1223 | r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, | |
1224 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, | |
1225 | &rdev->vram_scratch.robj); | |
1226 | if (r) { | |
1227 | return r; | |
1228 | } | |
1229 | } | |
1230 | ||
1231 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); | |
1232 | if (unlikely(r != 0)) | |
1233 | return r; | |
1234 | r = radeon_bo_pin(rdev->vram_scratch.robj, | |
1235 | RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr); | |
1236 | if (r) { | |
1237 | radeon_bo_unreserve(rdev->vram_scratch.robj); | |
1238 | return r; | |
1239 | } | |
1240 | r = radeon_bo_kmap(rdev->vram_scratch.robj, | |
1241 | (void **)&rdev->vram_scratch.ptr); | |
1242 | if (r) | |
1243 | radeon_bo_unpin(rdev->vram_scratch.robj); | |
1244 | radeon_bo_unreserve(rdev->vram_scratch.robj); | |
1245 | ||
1246 | return r; | |
1247 | } | |
1248 | ||
1249 | void r600_vram_scratch_fini(struct radeon_device *rdev) | |
1250 | { | |
1251 | int r; | |
1252 | ||
1253 | if (rdev->vram_scratch.robj == NULL) { | |
1254 | return; | |
1255 | } | |
1256 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); | |
1257 | if (likely(r == 0)) { | |
1258 | radeon_bo_kunmap(rdev->vram_scratch.robj); | |
1259 | radeon_bo_unpin(rdev->vram_scratch.robj); | |
1260 | radeon_bo_unreserve(rdev->vram_scratch.robj); | |
1261 | } | |
1262 | radeon_bo_unref(&rdev->vram_scratch.robj); | |
1263 | } | |
1264 | ||
1265 | /* We doesn't check that the GPU really needs a reset we simply do the | |
1266 | * reset, it's up to the caller to determine if the GPU needs one. We | |
1267 | * might add an helper function to check that. | |
1268 | */ | |
1269 | int r600_gpu_soft_reset(struct radeon_device *rdev) | |
1270 | { | |
1271 | struct rv515_mc_save save; | |
1272 | u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | | |
1273 | S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | | |
1274 | S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | | |
1275 | S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) | | |
1276 | S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) | | |
1277 | S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) | | |
1278 | S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) | | |
1279 | S_008010_GUI_ACTIVE(1); | |
1280 | u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) | | |
1281 | S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) | | |
1282 | S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) | | |
1283 | S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) | | |
1284 | S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) | | |
1285 | S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) | | |
1286 | S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | | |
1287 | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); | |
1288 | u32 tmp; | |
1289 | ||
1290 | if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) | |
1291 | return 0; | |
1292 | ||
1293 | dev_info(rdev->dev, "GPU softreset \n"); | |
1294 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", | |
1295 | RREG32(R_008010_GRBM_STATUS)); | |
1296 | dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", | |
1297 | RREG32(R_008014_GRBM_STATUS2)); | |
1298 | dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", | |
1299 | RREG32(R_000E50_SRBM_STATUS)); | |
1300 | rv515_mc_stop(rdev, &save); | |
1301 | if (r600_mc_wait_for_idle(rdev)) { | |
1302 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | |
1303 | } | |
1304 | /* Disable CP parsing/prefetching */ | |
1305 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); | |
1306 | /* Check if any of the rendering block is busy and reset it */ | |
1307 | if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || | |
1308 | (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { | |
1309 | tmp = S_008020_SOFT_RESET_CR(1) | | |
1310 | S_008020_SOFT_RESET_DB(1) | | |
1311 | S_008020_SOFT_RESET_CB(1) | | |
1312 | S_008020_SOFT_RESET_PA(1) | | |
1313 | S_008020_SOFT_RESET_SC(1) | | |
1314 | S_008020_SOFT_RESET_SMX(1) | | |
1315 | S_008020_SOFT_RESET_SPI(1) | | |
1316 | S_008020_SOFT_RESET_SX(1) | | |
1317 | S_008020_SOFT_RESET_SH(1) | | |
1318 | S_008020_SOFT_RESET_TC(1) | | |
1319 | S_008020_SOFT_RESET_TA(1) | | |
1320 | S_008020_SOFT_RESET_VC(1) | | |
1321 | S_008020_SOFT_RESET_VGT(1); | |
1322 | dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); | |
1323 | WREG32(R_008020_GRBM_SOFT_RESET, tmp); | |
1324 | RREG32(R_008020_GRBM_SOFT_RESET); | |
1325 | mdelay(15); | |
1326 | WREG32(R_008020_GRBM_SOFT_RESET, 0); | |
1327 | } | |
1328 | /* Reset CP (we always reset CP) */ | |
1329 | tmp = S_008020_SOFT_RESET_CP(1); | |
1330 | dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); | |
1331 | WREG32(R_008020_GRBM_SOFT_RESET, tmp); | |
1332 | RREG32(R_008020_GRBM_SOFT_RESET); | |
1333 | mdelay(15); | |
1334 | WREG32(R_008020_GRBM_SOFT_RESET, 0); | |
1335 | /* Wait a little for things to settle down */ | |
1336 | mdelay(1); | |
1337 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", | |
1338 | RREG32(R_008010_GRBM_STATUS)); | |
1339 | dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", | |
1340 | RREG32(R_008014_GRBM_STATUS2)); | |
1341 | dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", | |
1342 | RREG32(R_000E50_SRBM_STATUS)); | |
1343 | rv515_mc_resume(rdev, &save); | |
1344 | return 0; | |
1345 | } | |
1346 | ||
1347 | bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | |
1348 | { | |
1349 | u32 srbm_status; | |
1350 | u32 grbm_status; | |
1351 | u32 grbm_status2; | |
1352 | struct r100_gpu_lockup *lockup; | |
1353 | int r; | |
1354 | ||
1355 | if (rdev->family >= CHIP_RV770) | |
1356 | lockup = &rdev->config.rv770.lockup; | |
1357 | else | |
1358 | lockup = &rdev->config.r600.lockup; | |
1359 | ||
1360 | srbm_status = RREG32(R_000E50_SRBM_STATUS); | |
1361 | grbm_status = RREG32(R_008010_GRBM_STATUS); | |
1362 | grbm_status2 = RREG32(R_008014_GRBM_STATUS2); | |
1363 | if (!G_008010_GUI_ACTIVE(grbm_status)) { | |
1364 | r100_gpu_lockup_update(lockup, ring); | |
1365 | return false; | |
1366 | } | |
1367 | /* force CP activities */ | |
1368 | r = radeon_ring_lock(rdev, ring, 2); | |
1369 | if (!r) { | |
1370 | /* PACKET2 NOP */ | |
1371 | radeon_ring_write(ring, 0x80000000); | |
1372 | radeon_ring_write(ring, 0x80000000); | |
1373 | radeon_ring_unlock_commit(rdev, ring); | |
1374 | } | |
1375 | ring->rptr = RREG32(ring->rptr_reg); | |
1376 | return r100_gpu_cp_is_lockup(rdev, lockup, ring); | |
1377 | } | |
1378 | ||
1379 | int r600_asic_reset(struct radeon_device *rdev) | |
1380 | { | |
1381 | return r600_gpu_soft_reset(rdev); | |
1382 | } | |
1383 | ||
1384 | static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes, | |
1385 | u32 num_backends, | |
1386 | u32 backend_disable_mask) | |
1387 | { | |
1388 | u32 backend_map = 0; | |
1389 | u32 enabled_backends_mask; | |
1390 | u32 enabled_backends_count; | |
1391 | u32 cur_pipe; | |
1392 | u32 swizzle_pipe[R6XX_MAX_PIPES]; | |
1393 | u32 cur_backend; | |
1394 | u32 i; | |
1395 | ||
1396 | if (num_tile_pipes > R6XX_MAX_PIPES) | |
1397 | num_tile_pipes = R6XX_MAX_PIPES; | |
1398 | if (num_tile_pipes < 1) | |
1399 | num_tile_pipes = 1; | |
1400 | if (num_backends > R6XX_MAX_BACKENDS) | |
1401 | num_backends = R6XX_MAX_BACKENDS; | |
1402 | if (num_backends < 1) | |
1403 | num_backends = 1; | |
1404 | ||
1405 | enabled_backends_mask = 0; | |
1406 | enabled_backends_count = 0; | |
1407 | for (i = 0; i < R6XX_MAX_BACKENDS; ++i) { | |
1408 | if (((backend_disable_mask >> i) & 1) == 0) { | |
1409 | enabled_backends_mask |= (1 << i); | |
1410 | ++enabled_backends_count; | |
1411 | } | |
1412 | if (enabled_backends_count == num_backends) | |
1413 | break; | |
1414 | } | |
1415 | ||
1416 | if (enabled_backends_count == 0) { | |
1417 | enabled_backends_mask = 1; | |
1418 | enabled_backends_count = 1; | |
1419 | } | |
1420 | ||
1421 | if (enabled_backends_count != num_backends) | |
1422 | num_backends = enabled_backends_count; | |
1423 | ||
1424 | memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES); | |
1425 | switch (num_tile_pipes) { | |
1426 | case 1: | |
1427 | swizzle_pipe[0] = 0; | |
1428 | break; | |
1429 | case 2: | |
1430 | swizzle_pipe[0] = 0; | |
1431 | swizzle_pipe[1] = 1; | |
1432 | break; | |
1433 | case 3: | |
1434 | swizzle_pipe[0] = 0; | |
1435 | swizzle_pipe[1] = 1; | |
1436 | swizzle_pipe[2] = 2; | |
1437 | break; | |
1438 | case 4: | |
1439 | swizzle_pipe[0] = 0; | |
1440 | swizzle_pipe[1] = 1; | |
1441 | swizzle_pipe[2] = 2; | |
1442 | swizzle_pipe[3] = 3; | |
1443 | break; | |
1444 | case 5: | |
1445 | swizzle_pipe[0] = 0; | |
1446 | swizzle_pipe[1] = 1; | |
1447 | swizzle_pipe[2] = 2; | |
1448 | swizzle_pipe[3] = 3; | |
1449 | swizzle_pipe[4] = 4; | |
1450 | break; | |
1451 | case 6: | |
1452 | swizzle_pipe[0] = 0; | |
1453 | swizzle_pipe[1] = 2; | |
1454 | swizzle_pipe[2] = 4; | |
1455 | swizzle_pipe[3] = 5; | |
1456 | swizzle_pipe[4] = 1; | |
1457 | swizzle_pipe[5] = 3; | |
1458 | break; | |
1459 | case 7: | |
1460 | swizzle_pipe[0] = 0; | |
1461 | swizzle_pipe[1] = 2; | |
1462 | swizzle_pipe[2] = 4; | |
1463 | swizzle_pipe[3] = 6; | |
1464 | swizzle_pipe[4] = 1; | |
1465 | swizzle_pipe[5] = 3; | |
1466 | swizzle_pipe[6] = 5; | |
1467 | break; | |
1468 | case 8: | |
1469 | swizzle_pipe[0] = 0; | |
1470 | swizzle_pipe[1] = 2; | |
1471 | swizzle_pipe[2] = 4; | |
1472 | swizzle_pipe[3] = 6; | |
1473 | swizzle_pipe[4] = 1; | |
1474 | swizzle_pipe[5] = 3; | |
1475 | swizzle_pipe[6] = 5; | |
1476 | swizzle_pipe[7] = 7; | |
1477 | break; | |
1478 | } | |
1479 | ||
1480 | cur_backend = 0; | |
1481 | for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { | |
1482 | while (((1 << cur_backend) & enabled_backends_mask) == 0) | |
1483 | cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; | |
1484 | ||
1485 | backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2))); | |
1486 | ||
1487 | cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; | |
1488 | } | |
1489 | ||
1490 | return backend_map; | |
1491 | } | |
1492 | ||
1493 | int r600_count_pipe_bits(uint32_t val) | |
1494 | { | |
1495 | int i, ret = 0; | |
1496 | ||
1497 | for (i = 0; i < 32; i++) { | |
1498 | ret += val & 1; | |
1499 | val >>= 1; | |
1500 | } | |
1501 | return ret; | |
1502 | } | |
1503 | ||
1504 | void r600_gpu_init(struct radeon_device *rdev) | |
1505 | { | |
1506 | u32 tiling_config; | |
1507 | u32 ramcfg; | |
1508 | u32 backend_map; | |
1509 | u32 cc_rb_backend_disable; | |
1510 | u32 cc_gc_shader_pipe_config; | |
1511 | u32 tmp; | |
1512 | int i, j; | |
1513 | u32 sq_config; | |
1514 | u32 sq_gpr_resource_mgmt_1 = 0; | |
1515 | u32 sq_gpr_resource_mgmt_2 = 0; | |
1516 | u32 sq_thread_resource_mgmt = 0; | |
1517 | u32 sq_stack_resource_mgmt_1 = 0; | |
1518 | u32 sq_stack_resource_mgmt_2 = 0; | |
1519 | ||
1520 | /* FIXME: implement */ | |
1521 | switch (rdev->family) { | |
1522 | case CHIP_R600: | |
1523 | rdev->config.r600.max_pipes = 4; | |
1524 | rdev->config.r600.max_tile_pipes = 8; | |
1525 | rdev->config.r600.max_simds = 4; | |
1526 | rdev->config.r600.max_backends = 4; | |
1527 | rdev->config.r600.max_gprs = 256; | |
1528 | rdev->config.r600.max_threads = 192; | |
1529 | rdev->config.r600.max_stack_entries = 256; | |
1530 | rdev->config.r600.max_hw_contexts = 8; | |
1531 | rdev->config.r600.max_gs_threads = 16; | |
1532 | rdev->config.r600.sx_max_export_size = 128; | |
1533 | rdev->config.r600.sx_max_export_pos_size = 16; | |
1534 | rdev->config.r600.sx_max_export_smx_size = 128; | |
1535 | rdev->config.r600.sq_num_cf_insts = 2; | |
1536 | break; | |
1537 | case CHIP_RV630: | |
1538 | case CHIP_RV635: | |
1539 | rdev->config.r600.max_pipes = 2; | |
1540 | rdev->config.r600.max_tile_pipes = 2; | |
1541 | rdev->config.r600.max_simds = 3; | |
1542 | rdev->config.r600.max_backends = 1; | |
1543 | rdev->config.r600.max_gprs = 128; | |
1544 | rdev->config.r600.max_threads = 192; | |
1545 | rdev->config.r600.max_stack_entries = 128; | |
1546 | rdev->config.r600.max_hw_contexts = 8; | |
1547 | rdev->config.r600.max_gs_threads = 4; | |
1548 | rdev->config.r600.sx_max_export_size = 128; | |
1549 | rdev->config.r600.sx_max_export_pos_size = 16; | |
1550 | rdev->config.r600.sx_max_export_smx_size = 128; | |
1551 | rdev->config.r600.sq_num_cf_insts = 2; | |
1552 | break; | |
1553 | case CHIP_RV610: | |
1554 | case CHIP_RV620: | |
1555 | case CHIP_RS780: | |
1556 | case CHIP_RS880: | |
1557 | rdev->config.r600.max_pipes = 1; | |
1558 | rdev->config.r600.max_tile_pipes = 1; | |
1559 | rdev->config.r600.max_simds = 2; | |
1560 | rdev->config.r600.max_backends = 1; | |
1561 | rdev->config.r600.max_gprs = 128; | |
1562 | rdev->config.r600.max_threads = 192; | |
1563 | rdev->config.r600.max_stack_entries = 128; | |
1564 | rdev->config.r600.max_hw_contexts = 4; | |
1565 | rdev->config.r600.max_gs_threads = 4; | |
1566 | rdev->config.r600.sx_max_export_size = 128; | |
1567 | rdev->config.r600.sx_max_export_pos_size = 16; | |
1568 | rdev->config.r600.sx_max_export_smx_size = 128; | |
1569 | rdev->config.r600.sq_num_cf_insts = 1; | |
1570 | break; | |
1571 | case CHIP_RV670: | |
1572 | rdev->config.r600.max_pipes = 4; | |
1573 | rdev->config.r600.max_tile_pipes = 4; | |
1574 | rdev->config.r600.max_simds = 4; | |
1575 | rdev->config.r600.max_backends = 4; | |
1576 | rdev->config.r600.max_gprs = 192; | |
1577 | rdev->config.r600.max_threads = 192; | |
1578 | rdev->config.r600.max_stack_entries = 256; | |
1579 | rdev->config.r600.max_hw_contexts = 8; | |
1580 | rdev->config.r600.max_gs_threads = 16; | |
1581 | rdev->config.r600.sx_max_export_size = 128; | |
1582 | rdev->config.r600.sx_max_export_pos_size = 16; | |
1583 | rdev->config.r600.sx_max_export_smx_size = 128; | |
1584 | rdev->config.r600.sq_num_cf_insts = 2; | |
1585 | break; | |
1586 | default: | |
1587 | break; | |
1588 | } | |
1589 | ||
1590 | /* Initialize HDP */ | |
1591 | for (i = 0, j = 0; i < 32; i++, j += 0x18) { | |
1592 | WREG32((0x2c14 + j), 0x00000000); | |
1593 | WREG32((0x2c18 + j), 0x00000000); | |
1594 | WREG32((0x2c1c + j), 0x00000000); | |
1595 | WREG32((0x2c20 + j), 0x00000000); | |
1596 | WREG32((0x2c24 + j), 0x00000000); | |
1597 | } | |
1598 | ||
1599 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | |
1600 | ||
1601 | /* Setup tiling */ | |
1602 | tiling_config = 0; | |
1603 | ramcfg = RREG32(RAMCFG); | |
1604 | switch (rdev->config.r600.max_tile_pipes) { | |
1605 | case 1: | |
1606 | tiling_config |= PIPE_TILING(0); | |
1607 | break; | |
1608 | case 2: | |
1609 | tiling_config |= PIPE_TILING(1); | |
1610 | break; | |
1611 | case 4: | |
1612 | tiling_config |= PIPE_TILING(2); | |
1613 | break; | |
1614 | case 8: | |
1615 | tiling_config |= PIPE_TILING(3); | |
1616 | break; | |
1617 | default: | |
1618 | break; | |
1619 | } | |
1620 | rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes; | |
1621 | rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); | |
1622 | tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); | |
1623 | tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); | |
1624 | if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) | |
1625 | rdev->config.r600.tiling_group_size = 512; | |
1626 | else | |
1627 | rdev->config.r600.tiling_group_size = 256; | |
1628 | tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; | |
1629 | if (tmp > 3) { | |
1630 | tiling_config |= ROW_TILING(3); | |
1631 | tiling_config |= SAMPLE_SPLIT(3); | |
1632 | } else { | |
1633 | tiling_config |= ROW_TILING(tmp); | |
1634 | tiling_config |= SAMPLE_SPLIT(tmp); | |
1635 | } | |
1636 | tiling_config |= BANK_SWAPS(1); | |
1637 | ||
1638 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; | |
1639 | cc_rb_backend_disable |= | |
1640 | BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK); | |
1641 | ||
1642 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; | |
1643 | cc_gc_shader_pipe_config |= | |
1644 | INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK); | |
1645 | cc_gc_shader_pipe_config |= | |
1646 | INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK); | |
1647 | ||
1648 | backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes, | |
1649 | (R6XX_MAX_BACKENDS - | |
1650 | r600_count_pipe_bits((cc_rb_backend_disable & | |
1651 | R6XX_MAX_BACKENDS_MASK) >> 16)), | |
1652 | (cc_rb_backend_disable >> 16)); | |
1653 | rdev->config.r600.tile_config = tiling_config; | |
1654 | rdev->config.r600.backend_map = backend_map; | |
1655 | tiling_config |= BACKEND_MAP(backend_map); | |
1656 | WREG32(GB_TILING_CONFIG, tiling_config); | |
1657 | WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); | |
1658 | WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); | |
1659 | ||
1660 | /* Setup pipes */ | |
1661 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | |
1662 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | |
1663 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | |
1664 | ||
1665 | tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); | |
1666 | WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); | |
1667 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); | |
1668 | ||
1669 | /* Setup some CP states */ | |
1670 | WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b))); | |
1671 | WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40))); | |
1672 | ||
1673 | WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT | | |
1674 | SYNC_WALKER | SYNC_ALIGNER)); | |
1675 | /* Setup various GPU states */ | |
1676 | if (rdev->family == CHIP_RV670) | |
1677 | WREG32(ARB_GDEC_RD_CNTL, 0x00000021); | |
1678 | ||
1679 | tmp = RREG32(SX_DEBUG_1); | |
1680 | tmp |= SMX_EVENT_RELEASE; | |
1681 | if ((rdev->family > CHIP_R600)) | |
1682 | tmp |= ENABLE_NEW_SMX_ADDRESS; | |
1683 | WREG32(SX_DEBUG_1, tmp); | |
1684 | ||
1685 | if (((rdev->family) == CHIP_R600) || | |
1686 | ((rdev->family) == CHIP_RV630) || | |
1687 | ((rdev->family) == CHIP_RV610) || | |
1688 | ((rdev->family) == CHIP_RV620) || | |
1689 | ((rdev->family) == CHIP_RS780) || | |
1690 | ((rdev->family) == CHIP_RS880)) { | |
1691 | WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE); | |
1692 | } else { | |
1693 | WREG32(DB_DEBUG, 0); | |
1694 | } | |
1695 | WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) | | |
1696 | DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4))); | |
1697 | ||
1698 | WREG32(PA_SC_MULTI_CHIP_CNTL, 0); | |
1699 | WREG32(VGT_NUM_INSTANCES, 0); | |
1700 | ||
1701 | WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0)); | |
1702 | WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0)); | |
1703 | ||
1704 | tmp = RREG32(SQ_MS_FIFO_SIZES); | |
1705 | if (((rdev->family) == CHIP_RV610) || | |
1706 | ((rdev->family) == CHIP_RV620) || | |
1707 | ((rdev->family) == CHIP_RS780) || | |
1708 | ((rdev->family) == CHIP_RS880)) { | |
1709 | tmp = (CACHE_FIFO_SIZE(0xa) | | |
1710 | FETCH_FIFO_HIWATER(0xa) | | |
1711 | DONE_FIFO_HIWATER(0xe0) | | |
1712 | ALU_UPDATE_FIFO_HIWATER(0x8)); | |
1713 | } else if (((rdev->family) == CHIP_R600) || | |
1714 | ((rdev->family) == CHIP_RV630)) { | |
1715 | tmp &= ~DONE_FIFO_HIWATER(0xff); | |
1716 | tmp |= DONE_FIFO_HIWATER(0x4); | |
1717 | } | |
1718 | WREG32(SQ_MS_FIFO_SIZES, tmp); | |
1719 | ||
1720 | /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT | |
1721 | * should be adjusted as needed by the 2D/3D drivers. This just sets default values | |
1722 | */ | |
1723 | sq_config = RREG32(SQ_CONFIG); | |
1724 | sq_config &= ~(PS_PRIO(3) | | |
1725 | VS_PRIO(3) | | |
1726 | GS_PRIO(3) | | |
1727 | ES_PRIO(3)); | |
1728 | sq_config |= (DX9_CONSTS | | |
1729 | VC_ENABLE | | |
1730 | PS_PRIO(0) | | |
1731 | VS_PRIO(1) | | |
1732 | GS_PRIO(2) | | |
1733 | ES_PRIO(3)); | |
1734 | ||
1735 | if ((rdev->family) == CHIP_R600) { | |
1736 | sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) | | |
1737 | NUM_VS_GPRS(124) | | |
1738 | NUM_CLAUSE_TEMP_GPRS(4)); | |
1739 | sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) | | |
1740 | NUM_ES_GPRS(0)); | |
1741 | sq_thread_resource_mgmt = (NUM_PS_THREADS(136) | | |
1742 | NUM_VS_THREADS(48) | | |
1743 | NUM_GS_THREADS(4) | | |
1744 | NUM_ES_THREADS(4)); | |
1745 | sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) | | |
1746 | NUM_VS_STACK_ENTRIES(128)); | |
1747 | sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) | | |
1748 | NUM_ES_STACK_ENTRIES(0)); | |
1749 | } else if (((rdev->family) == CHIP_RV610) || | |
1750 | ((rdev->family) == CHIP_RV620) || | |
1751 | ((rdev->family) == CHIP_RS780) || | |
1752 | ((rdev->family) == CHIP_RS880)) { | |
1753 | /* no vertex cache */ | |
1754 | sq_config &= ~VC_ENABLE; | |
1755 | ||
1756 | sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | | |
1757 | NUM_VS_GPRS(44) | | |
1758 | NUM_CLAUSE_TEMP_GPRS(2)); | |
1759 | sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) | | |
1760 | NUM_ES_GPRS(17)); | |
1761 | sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | | |
1762 | NUM_VS_THREADS(78) | | |
1763 | NUM_GS_THREADS(4) | | |
1764 | NUM_ES_THREADS(31)); | |
1765 | sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) | | |
1766 | NUM_VS_STACK_ENTRIES(40)); | |
1767 | sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) | | |
1768 | NUM_ES_STACK_ENTRIES(16)); | |
1769 | } else if (((rdev->family) == CHIP_RV630) || | |
1770 | ((rdev->family) == CHIP_RV635)) { | |
1771 | sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | | |
1772 | NUM_VS_GPRS(44) | | |
1773 | NUM_CLAUSE_TEMP_GPRS(2)); | |
1774 | sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) | | |
1775 | NUM_ES_GPRS(18)); | |
1776 | sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | | |
1777 | NUM_VS_THREADS(78) | | |
1778 | NUM_GS_THREADS(4) | | |
1779 | NUM_ES_THREADS(31)); | |
1780 | sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) | | |
1781 | NUM_VS_STACK_ENTRIES(40)); | |
1782 | sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) | | |
1783 | NUM_ES_STACK_ENTRIES(16)); | |
1784 | } else if ((rdev->family) == CHIP_RV670) { | |
1785 | sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | | |
1786 | NUM_VS_GPRS(44) | | |
1787 | NUM_CLAUSE_TEMP_GPRS(2)); | |
1788 | sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) | | |
1789 | NUM_ES_GPRS(17)); | |
1790 | sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | | |
1791 | NUM_VS_THREADS(78) | | |
1792 | NUM_GS_THREADS(4) | | |
1793 | NUM_ES_THREADS(31)); | |
1794 | sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) | | |
1795 | NUM_VS_STACK_ENTRIES(64)); | |
1796 | sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) | | |
1797 | NUM_ES_STACK_ENTRIES(64)); | |
1798 | } | |
1799 | ||
1800 | WREG32(SQ_CONFIG, sq_config); | |
1801 | WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); | |
1802 | WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); | |
1803 | WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); | |
1804 | WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); | |
1805 | WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); | |
1806 | ||
1807 | if (((rdev->family) == CHIP_RV610) || | |
1808 | ((rdev->family) == CHIP_RV620) || | |
1809 | ((rdev->family) == CHIP_RS780) || | |
1810 | ((rdev->family) == CHIP_RS880)) { | |
1811 | WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY)); | |
1812 | } else { | |
1813 | WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC)); | |
1814 | } | |
1815 | ||
1816 | /* More default values. 2D/3D driver should adjust as needed */ | |
1817 | WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) | | |
1818 | S1_X(0x4) | S1_Y(0xc))); | |
1819 | WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) | | |
1820 | S1_X(0x2) | S1_Y(0x2) | | |
1821 | S2_X(0xa) | S2_Y(0x6) | | |
1822 | S3_X(0x6) | S3_Y(0xa))); | |
1823 | WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) | | |
1824 | S1_X(0x4) | S1_Y(0xc) | | |
1825 | S2_X(0x1) | S2_Y(0x6) | | |
1826 | S3_X(0xa) | S3_Y(0xe))); | |
1827 | WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) | | |
1828 | S5_X(0x0) | S5_Y(0x0) | | |
1829 | S6_X(0xb) | S6_Y(0x4) | | |
1830 | S7_X(0x7) | S7_Y(0x8))); | |
1831 | ||
1832 | WREG32(VGT_STRMOUT_EN, 0); | |
1833 | tmp = rdev->config.r600.max_pipes * 16; | |
1834 | switch (rdev->family) { | |
1835 | case CHIP_RV610: | |
1836 | case CHIP_RV620: | |
1837 | case CHIP_RS780: | |
1838 | case CHIP_RS880: | |
1839 | tmp += 32; | |
1840 | break; | |
1841 | case CHIP_RV670: | |
1842 | tmp += 128; | |
1843 | break; | |
1844 | default: | |
1845 | break; | |
1846 | } | |
1847 | if (tmp > 256) { | |
1848 | tmp = 256; | |
1849 | } | |
1850 | WREG32(VGT_ES_PER_GS, 128); | |
1851 | WREG32(VGT_GS_PER_ES, tmp); | |
1852 | WREG32(VGT_GS_PER_VS, 2); | |
1853 | WREG32(VGT_GS_VERTEX_REUSE, 16); | |
1854 | ||
1855 | /* more default values. 2D/3D driver should adjust as needed */ | |
1856 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); | |
1857 | WREG32(VGT_STRMOUT_EN, 0); | |
1858 | WREG32(SX_MISC, 0); | |
1859 | WREG32(PA_SC_MODE_CNTL, 0); | |
1860 | WREG32(PA_SC_AA_CONFIG, 0); | |
1861 | WREG32(PA_SC_LINE_STIPPLE, 0); | |
1862 | WREG32(SPI_INPUT_Z, 0); | |
1863 | WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2)); | |
1864 | WREG32(CB_COLOR7_FRAG, 0); | |
1865 | ||
1866 | /* Clear render buffer base addresses */ | |
1867 | WREG32(CB_COLOR0_BASE, 0); | |
1868 | WREG32(CB_COLOR1_BASE, 0); | |
1869 | WREG32(CB_COLOR2_BASE, 0); | |
1870 | WREG32(CB_COLOR3_BASE, 0); | |
1871 | WREG32(CB_COLOR4_BASE, 0); | |
1872 | WREG32(CB_COLOR5_BASE, 0); | |
1873 | WREG32(CB_COLOR6_BASE, 0); | |
1874 | WREG32(CB_COLOR7_BASE, 0); | |
1875 | WREG32(CB_COLOR7_FRAG, 0); | |
1876 | ||
1877 | switch (rdev->family) { | |
1878 | case CHIP_RV610: | |
1879 | case CHIP_RV620: | |
1880 | case CHIP_RS780: | |
1881 | case CHIP_RS880: | |
1882 | tmp = TC_L2_SIZE(8); | |
1883 | break; | |
1884 | case CHIP_RV630: | |
1885 | case CHIP_RV635: | |
1886 | tmp = TC_L2_SIZE(4); | |
1887 | break; | |
1888 | case CHIP_R600: | |
1889 | tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT; | |
1890 | break; | |
1891 | default: | |
1892 | tmp = TC_L2_SIZE(0); | |
1893 | break; | |
1894 | } | |
1895 | WREG32(TC_CNTL, tmp); | |
1896 | ||
1897 | tmp = RREG32(HDP_HOST_PATH_CNTL); | |
1898 | WREG32(HDP_HOST_PATH_CNTL, tmp); | |
1899 | ||
1900 | tmp = RREG32(ARB_POP); | |
1901 | tmp |= ENABLE_TC128; | |
1902 | WREG32(ARB_POP, tmp); | |
1903 | ||
1904 | WREG32(PA_SC_MULTI_CHIP_CNTL, 0); | |
1905 | WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | | |
1906 | NUM_CLIP_SEQ(3))); | |
1907 | WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095)); | |
1908 | } | |
1909 | ||
1910 | ||
1911 | /* | |
1912 | * Indirect registers accessor | |
1913 | */ | |
1914 | u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) | |
1915 | { | |
1916 | u32 r; | |
1917 | ||
1918 | WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); | |
1919 | (void)RREG32(PCIE_PORT_INDEX); | |
1920 | r = RREG32(PCIE_PORT_DATA); | |
1921 | return r; | |
1922 | } | |
1923 | ||
1924 | void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |
1925 | { | |
1926 | WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); | |
1927 | (void)RREG32(PCIE_PORT_INDEX); | |
1928 | WREG32(PCIE_PORT_DATA, (v)); | |
1929 | (void)RREG32(PCIE_PORT_DATA); | |
1930 | } | |
1931 | ||
1932 | /* | |
1933 | * CP & Ring | |
1934 | */ | |
1935 | void r600_cp_stop(struct radeon_device *rdev) | |
1936 | { | |
1937 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | |
1938 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); | |
1939 | WREG32(SCRATCH_UMSK, 0); | |
1940 | } | |
1941 | ||
1942 | int r600_init_microcode(struct radeon_device *rdev) | |
1943 | { | |
1944 | struct platform_device *pdev; | |
1945 | const char *chip_name; | |
1946 | const char *rlc_chip_name; | |
1947 | size_t pfp_req_size, me_req_size, rlc_req_size; | |
1948 | char fw_name[30]; | |
1949 | int err; | |
1950 | ||
1951 | DRM_DEBUG("\n"); | |
1952 | ||
1953 | pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); | |
1954 | err = IS_ERR(pdev); | |
1955 | if (err) { | |
1956 | printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); | |
1957 | return -EINVAL; | |
1958 | } | |
1959 | ||
1960 | switch (rdev->family) { | |
1961 | case CHIP_R600: | |
1962 | chip_name = "R600"; | |
1963 | rlc_chip_name = "R600"; | |
1964 | break; | |
1965 | case CHIP_RV610: | |
1966 | chip_name = "RV610"; | |
1967 | rlc_chip_name = "R600"; | |
1968 | break; | |
1969 | case CHIP_RV630: | |
1970 | chip_name = "RV630"; | |
1971 | rlc_chip_name = "R600"; | |
1972 | break; | |
1973 | case CHIP_RV620: | |
1974 | chip_name = "RV620"; | |
1975 | rlc_chip_name = "R600"; | |
1976 | break; | |
1977 | case CHIP_RV635: | |
1978 | chip_name = "RV635"; | |
1979 | rlc_chip_name = "R600"; | |
1980 | break; | |
1981 | case CHIP_RV670: | |
1982 | chip_name = "RV670"; | |
1983 | rlc_chip_name = "R600"; | |
1984 | break; | |
1985 | case CHIP_RS780: | |
1986 | case CHIP_RS880: | |
1987 | chip_name = "RS780"; | |
1988 | rlc_chip_name = "R600"; | |
1989 | break; | |
1990 | case CHIP_RV770: | |
1991 | chip_name = "RV770"; | |
1992 | rlc_chip_name = "R700"; | |
1993 | break; | |
1994 | case CHIP_RV730: | |
1995 | case CHIP_RV740: | |
1996 | chip_name = "RV730"; | |
1997 | rlc_chip_name = "R700"; | |
1998 | break; | |
1999 | case CHIP_RV710: | |
2000 | chip_name = "RV710"; | |
2001 | rlc_chip_name = "R700"; | |
2002 | break; | |
2003 | case CHIP_CEDAR: | |
2004 | chip_name = "CEDAR"; | |
2005 | rlc_chip_name = "CEDAR"; | |
2006 | break; | |
2007 | case CHIP_REDWOOD: | |
2008 | chip_name = "REDWOOD"; | |
2009 | rlc_chip_name = "REDWOOD"; | |
2010 | break; | |
2011 | case CHIP_JUNIPER: | |
2012 | chip_name = "JUNIPER"; | |
2013 | rlc_chip_name = "JUNIPER"; | |
2014 | break; | |
2015 | case CHIP_CYPRESS: | |
2016 | case CHIP_HEMLOCK: | |
2017 | chip_name = "CYPRESS"; | |
2018 | rlc_chip_name = "CYPRESS"; | |
2019 | break; | |
2020 | case CHIP_PALM: | |
2021 | chip_name = "PALM"; | |
2022 | rlc_chip_name = "SUMO"; | |
2023 | break; | |
2024 | case CHIP_SUMO: | |
2025 | chip_name = "SUMO"; | |
2026 | rlc_chip_name = "SUMO"; | |
2027 | break; | |
2028 | case CHIP_SUMO2: | |
2029 | chip_name = "SUMO2"; | |
2030 | rlc_chip_name = "SUMO"; | |
2031 | break; | |
2032 | default: BUG(); | |
2033 | } | |
2034 | ||
2035 | if (rdev->family >= CHIP_CEDAR) { | |
2036 | pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; | |
2037 | me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; | |
2038 | rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; | |
2039 | } else if (rdev->family >= CHIP_RV770) { | |
2040 | pfp_req_size = R700_PFP_UCODE_SIZE * 4; | |
2041 | me_req_size = R700_PM4_UCODE_SIZE * 4; | |
2042 | rlc_req_size = R700_RLC_UCODE_SIZE * 4; | |
2043 | } else { | |
2044 | pfp_req_size = PFP_UCODE_SIZE * 4; | |
2045 | me_req_size = PM4_UCODE_SIZE * 12; | |
2046 | rlc_req_size = RLC_UCODE_SIZE * 4; | |
2047 | } | |
2048 | ||
2049 | DRM_INFO("Loading %s Microcode\n", chip_name); | |
2050 | ||
2051 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); | |
2052 | err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); | |
2053 | if (err) | |
2054 | goto out; | |
2055 | if (rdev->pfp_fw->size != pfp_req_size) { | |
2056 | printk(KERN_ERR | |
2057 | "r600_cp: Bogus length %zu in firmware \"%s\"\n", | |
2058 | rdev->pfp_fw->size, fw_name); | |
2059 | err = -EINVAL; | |
2060 | goto out; | |
2061 | } | |
2062 | ||
2063 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); | |
2064 | err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); | |
2065 | if (err) | |
2066 | goto out; | |
2067 | if (rdev->me_fw->size != me_req_size) { | |
2068 | printk(KERN_ERR | |
2069 | "r600_cp: Bogus length %zu in firmware \"%s\"\n", | |
2070 | rdev->me_fw->size, fw_name); | |
2071 | err = -EINVAL; | |
2072 | } | |
2073 | ||
2074 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); | |
2075 | err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); | |
2076 | if (err) | |
2077 | goto out; | |
2078 | if (rdev->rlc_fw->size != rlc_req_size) { | |
2079 | printk(KERN_ERR | |
2080 | "r600_rlc: Bogus length %zu in firmware \"%s\"\n", | |
2081 | rdev->rlc_fw->size, fw_name); | |
2082 | err = -EINVAL; | |
2083 | } | |
2084 | ||
2085 | out: | |
2086 | platform_device_unregister(pdev); | |
2087 | ||
2088 | if (err) { | |
2089 | if (err != -EINVAL) | |
2090 | printk(KERN_ERR | |
2091 | "r600_cp: Failed to load firmware \"%s\"\n", | |
2092 | fw_name); | |
2093 | release_firmware(rdev->pfp_fw); | |
2094 | rdev->pfp_fw = NULL; | |
2095 | release_firmware(rdev->me_fw); | |
2096 | rdev->me_fw = NULL; | |
2097 | release_firmware(rdev->rlc_fw); | |
2098 | rdev->rlc_fw = NULL; | |
2099 | } | |
2100 | return err; | |
2101 | } | |
2102 | ||
2103 | static int r600_cp_load_microcode(struct radeon_device *rdev) | |
2104 | { | |
2105 | const __be32 *fw_data; | |
2106 | int i; | |
2107 | ||
2108 | if (!rdev->me_fw || !rdev->pfp_fw) | |
2109 | return -EINVAL; | |
2110 | ||
2111 | r600_cp_stop(rdev); | |
2112 | ||
2113 | WREG32(CP_RB_CNTL, | |
2114 | #ifdef __BIG_ENDIAN | |
2115 | BUF_SWAP_32BIT | | |
2116 | #endif | |
2117 | RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); | |
2118 | ||
2119 | /* Reset cp */ | |
2120 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); | |
2121 | RREG32(GRBM_SOFT_RESET); | |
2122 | mdelay(15); | |
2123 | WREG32(GRBM_SOFT_RESET, 0); | |
2124 | ||
2125 | WREG32(CP_ME_RAM_WADDR, 0); | |
2126 | ||
2127 | fw_data = (const __be32 *)rdev->me_fw->data; | |
2128 | WREG32(CP_ME_RAM_WADDR, 0); | |
2129 | for (i = 0; i < PM4_UCODE_SIZE * 3; i++) | |
2130 | WREG32(CP_ME_RAM_DATA, | |
2131 | be32_to_cpup(fw_data++)); | |
2132 | ||
2133 | fw_data = (const __be32 *)rdev->pfp_fw->data; | |
2134 | WREG32(CP_PFP_UCODE_ADDR, 0); | |
2135 | for (i = 0; i < PFP_UCODE_SIZE; i++) | |
2136 | WREG32(CP_PFP_UCODE_DATA, | |
2137 | be32_to_cpup(fw_data++)); | |
2138 | ||
2139 | WREG32(CP_PFP_UCODE_ADDR, 0); | |
2140 | WREG32(CP_ME_RAM_WADDR, 0); | |
2141 | WREG32(CP_ME_RAM_RADDR, 0); | |
2142 | return 0; | |
2143 | } | |
2144 | ||
2145 | int r600_cp_start(struct radeon_device *rdev) | |
2146 | { | |
2147 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | |
2148 | int r; | |
2149 | uint32_t cp_me; | |
2150 | ||
2151 | r = radeon_ring_lock(rdev, ring, 7); | |
2152 | if (r) { | |
2153 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | |
2154 | return r; | |
2155 | } | |
2156 | radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); | |
2157 | radeon_ring_write(ring, 0x1); | |
2158 | if (rdev->family >= CHIP_RV770) { | |
2159 | radeon_ring_write(ring, 0x0); | |
2160 | radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1); | |
2161 | } else { | |
2162 | radeon_ring_write(ring, 0x3); | |
2163 | radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1); | |
2164 | } | |
2165 | radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); | |
2166 | radeon_ring_write(ring, 0); | |
2167 | radeon_ring_write(ring, 0); | |
2168 | radeon_ring_unlock_commit(rdev, ring); | |
2169 | ||
2170 | cp_me = 0xff; | |
2171 | WREG32(R_0086D8_CP_ME_CNTL, cp_me); | |
2172 | return 0; | |
2173 | } | |
2174 | ||
2175 | int r600_cp_resume(struct radeon_device *rdev) | |
2176 | { | |
2177 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | |
2178 | u32 tmp; | |
2179 | u32 rb_bufsz; | |
2180 | int r; | |
2181 | ||
2182 | /* Reset cp */ | |
2183 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); | |
2184 | RREG32(GRBM_SOFT_RESET); | |
2185 | mdelay(15); | |
2186 | WREG32(GRBM_SOFT_RESET, 0); | |
2187 | ||
2188 | /* Set ring buffer size */ | |
2189 | rb_bufsz = drm_order(ring->ring_size / 8); | |
2190 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | |
2191 | #ifdef __BIG_ENDIAN | |
2192 | tmp |= BUF_SWAP_32BIT; | |
2193 | #endif | |
2194 | WREG32(CP_RB_CNTL, tmp); | |
2195 | WREG32(CP_SEM_WAIT_TIMER, 0x0); | |
2196 | ||
2197 | /* Set the write pointer delay */ | |
2198 | WREG32(CP_RB_WPTR_DELAY, 0); | |
2199 | ||
2200 | /* Initialize the ring buffer's read and write pointers */ | |
2201 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | |
2202 | WREG32(CP_RB_RPTR_WR, 0); | |
2203 | ring->wptr = 0; | |
2204 | WREG32(CP_RB_WPTR, ring->wptr); | |
2205 | ||
2206 | /* set the wb address whether it's enabled or not */ | |
2207 | WREG32(CP_RB_RPTR_ADDR, | |
2208 | ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); | |
2209 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | |
2210 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | |
2211 | ||
2212 | if (rdev->wb.enabled) | |
2213 | WREG32(SCRATCH_UMSK, 0xff); | |
2214 | else { | |
2215 | tmp |= RB_NO_UPDATE; | |
2216 | WREG32(SCRATCH_UMSK, 0); | |
2217 | } | |
2218 | ||
2219 | mdelay(1); | |
2220 | WREG32(CP_RB_CNTL, tmp); | |
2221 | ||
2222 | WREG32(CP_RB_BASE, ring->gpu_addr >> 8); | |
2223 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); | |
2224 | ||
2225 | ring->rptr = RREG32(CP_RB_RPTR); | |
2226 | ||
2227 | r600_cp_start(rdev); | |
2228 | ring->ready = true; | |
2229 | r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); | |
2230 | if (r) { | |
2231 | ring->ready = false; | |
2232 | return r; | |
2233 | } | |
2234 | return 0; | |
2235 | } | |
2236 | ||
2237 | void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size) | |
2238 | { | |
2239 | u32 rb_bufsz; | |
2240 | ||
2241 | /* Align ring size */ | |
2242 | rb_bufsz = drm_order(ring_size / 8); | |
2243 | ring_size = (1 << (rb_bufsz + 1)) * 4; | |
2244 | ring->ring_size = ring_size; | |
2245 | ring->align_mask = 16 - 1; | |
2246 | } | |
2247 | ||
2248 | void r600_cp_fini(struct radeon_device *rdev) | |
2249 | { | |
2250 | r600_cp_stop(rdev); | |
2251 | radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); | |
2252 | } | |
2253 | ||
2254 | ||
2255 | /* | |
2256 | * GPU scratch registers helpers function. | |
2257 | */ | |
2258 | void r600_scratch_init(struct radeon_device *rdev) | |
2259 | { | |
2260 | int i; | |
2261 | ||
2262 | rdev->scratch.num_reg = 7; | |
2263 | rdev->scratch.reg_base = SCRATCH_REG0; | |
2264 | for (i = 0; i < rdev->scratch.num_reg; i++) { | |
2265 | rdev->scratch.free[i] = true; | |
2266 | rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); | |
2267 | } | |
2268 | } | |
2269 | ||
2270 | int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) | |
2271 | { | |
2272 | uint32_t scratch; | |
2273 | uint32_t tmp = 0; | |
2274 | unsigned i, ridx = radeon_ring_index(rdev, ring); | |
2275 | int r; | |
2276 | ||
2277 | r = radeon_scratch_get(rdev, &scratch); | |
2278 | if (r) { | |
2279 | DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); | |
2280 | return r; | |
2281 | } | |
2282 | WREG32(scratch, 0xCAFEDEAD); | |
2283 | r = radeon_ring_lock(rdev, ring, 3); | |
2284 | if (r) { | |
2285 | DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r); | |
2286 | radeon_scratch_free(rdev, scratch); | |
2287 | return r; | |
2288 | } | |
2289 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | |
2290 | radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); | |
2291 | radeon_ring_write(ring, 0xDEADBEEF); | |
2292 | radeon_ring_unlock_commit(rdev, ring); | |
2293 | for (i = 0; i < rdev->usec_timeout; i++) { | |
2294 | tmp = RREG32(scratch); | |
2295 | if (tmp == 0xDEADBEEF) | |
2296 | break; | |
2297 | DRM_UDELAY(1); | |
2298 | } | |
2299 | if (i < rdev->usec_timeout) { | |
2300 | DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i); | |
2301 | } else { | |
2302 | DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n", | |
2303 | ridx, scratch, tmp); | |
2304 | r = -EINVAL; | |
2305 | } | |
2306 | radeon_scratch_free(rdev, scratch); | |
2307 | return r; | |
2308 | } | |
2309 | ||
2310 | void r600_fence_ring_emit(struct radeon_device *rdev, | |
2311 | struct radeon_fence *fence) | |
2312 | { | |
2313 | struct radeon_ring *ring = &rdev->ring[fence->ring]; | |
2314 | ||
2315 | if (rdev->wb.use_event) { | |
2316 | u64 addr = rdev->fence_drv[fence->ring].gpu_addr; | |
2317 | /* flush read cache over gart */ | |
2318 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); | |
2319 | radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | | |
2320 | PACKET3_VC_ACTION_ENA | | |
2321 | PACKET3_SH_ACTION_ENA); | |
2322 | radeon_ring_write(ring, 0xFFFFFFFF); | |
2323 | radeon_ring_write(ring, 0); | |
2324 | radeon_ring_write(ring, 10); /* poll interval */ | |
2325 | /* EVENT_WRITE_EOP - flush caches, send int */ | |
2326 | radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); | |
2327 | radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); | |
2328 | radeon_ring_write(ring, addr & 0xffffffff); | |
2329 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); | |
2330 | radeon_ring_write(ring, fence->seq); | |
2331 | radeon_ring_write(ring, 0); | |
2332 | } else { | |
2333 | /* flush read cache over gart */ | |
2334 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); | |
2335 | radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | | |
2336 | PACKET3_VC_ACTION_ENA | | |
2337 | PACKET3_SH_ACTION_ENA); | |
2338 | radeon_ring_write(ring, 0xFFFFFFFF); | |
2339 | radeon_ring_write(ring, 0); | |
2340 | radeon_ring_write(ring, 10); /* poll interval */ | |
2341 | radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0)); | |
2342 | radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); | |
2343 | /* wait for 3D idle clean */ | |
2344 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | |
2345 | radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | |
2346 | radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); | |
2347 | /* Emit fence sequence & fire IRQ */ | |
2348 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | |
2349 | radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); | |
2350 | radeon_ring_write(ring, fence->seq); | |
2351 | /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ | |
2352 | radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0)); | |
2353 | radeon_ring_write(ring, RB_INT_STAT); | |
2354 | } | |
2355 | } | |
2356 | ||
2357 | void r600_semaphore_ring_emit(struct radeon_device *rdev, | |
2358 | struct radeon_ring *ring, | |
2359 | struct radeon_semaphore *semaphore, | |
2360 | bool emit_wait) | |
2361 | { | |
2362 | uint64_t addr = semaphore->gpu_addr; | |
2363 | unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; | |
2364 | ||
2365 | radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); | |
2366 | radeon_ring_write(ring, addr & 0xffffffff); | |
2367 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); | |
2368 | } | |
2369 | ||
2370 | int r600_copy_blit(struct radeon_device *rdev, | |
2371 | uint64_t src_offset, | |
2372 | uint64_t dst_offset, | |
2373 | unsigned num_gpu_pages, | |
2374 | struct radeon_fence *fence) | |
2375 | { | |
2376 | int r; | |
2377 | ||
2378 | mutex_lock(&rdev->r600_blit.mutex); | |
2379 | rdev->r600_blit.vb_ib = NULL; | |
2380 | r = r600_blit_prepare_copy(rdev, num_gpu_pages); | |
2381 | if (r) { | |
2382 | if (rdev->r600_blit.vb_ib) | |
2383 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | |
2384 | mutex_unlock(&rdev->r600_blit.mutex); | |
2385 | return r; | |
2386 | } | |
2387 | r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages); | |
2388 | r600_blit_done_copy(rdev, fence); | |
2389 | mutex_unlock(&rdev->r600_blit.mutex); | |
2390 | return 0; | |
2391 | } | |
2392 | ||
2393 | void r600_blit_suspend(struct radeon_device *rdev) | |
2394 | { | |
2395 | int r; | |
2396 | ||
2397 | /* unpin shaders bo */ | |
2398 | if (rdev->r600_blit.shader_obj) { | |
2399 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | |
2400 | if (!r) { | |
2401 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | |
2402 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | |
2403 | } | |
2404 | } | |
2405 | } | |
2406 | ||
2407 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, | |
2408 | uint32_t tiling_flags, uint32_t pitch, | |
2409 | uint32_t offset, uint32_t obj_size) | |
2410 | { | |
2411 | /* FIXME: implement */ | |
2412 | return 0; | |
2413 | } | |
2414 | ||
2415 | void r600_clear_surface_reg(struct radeon_device *rdev, int reg) | |
2416 | { | |
2417 | /* FIXME: implement */ | |
2418 | } | |
2419 | ||
2420 | int r600_startup(struct radeon_device *rdev) | |
2421 | { | |
2422 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | |
2423 | int r; | |
2424 | ||
2425 | /* enable pcie gen2 link */ | |
2426 | r600_pcie_gen2_enable(rdev); | |
2427 | ||
2428 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | |
2429 | r = r600_init_microcode(rdev); | |
2430 | if (r) { | |
2431 | DRM_ERROR("Failed to load firmware!\n"); | |
2432 | return r; | |
2433 | } | |
2434 | } | |
2435 | ||
2436 | r = r600_vram_scratch_init(rdev); | |
2437 | if (r) | |
2438 | return r; | |
2439 | ||
2440 | r600_mc_program(rdev); | |
2441 | if (rdev->flags & RADEON_IS_AGP) { | |
2442 | r600_agp_enable(rdev); | |
2443 | } else { | |
2444 | r = r600_pcie_gart_enable(rdev); | |
2445 | if (r) | |
2446 | return r; | |
2447 | } | |
2448 | r600_gpu_init(rdev); | |
2449 | r = r600_blit_init(rdev); | |
2450 | if (r) { | |
2451 | r600_blit_fini(rdev); | |
2452 | rdev->asic->copy.copy = NULL; | |
2453 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | |
2454 | } | |
2455 | ||
2456 | /* allocate wb buffer */ | |
2457 | r = radeon_wb_init(rdev); | |
2458 | if (r) | |
2459 | return r; | |
2460 | ||
2461 | r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); | |
2462 | if (r) { | |
2463 | dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); | |
2464 | return r; | |
2465 | } | |
2466 | ||
2467 | /* Enable IRQ */ | |
2468 | r = r600_irq_init(rdev); | |
2469 | if (r) { | |
2470 | DRM_ERROR("radeon: IH init failed (%d).\n", r); | |
2471 | radeon_irq_kms_fini(rdev); | |
2472 | return r; | |
2473 | } | |
2474 | r600_irq_set(rdev); | |
2475 | ||
2476 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | |
2477 | R600_CP_RB_RPTR, R600_CP_RB_WPTR, | |
2478 | 0, 0xfffff, RADEON_CP_PACKET2); | |
2479 | ||
2480 | if (r) | |
2481 | return r; | |
2482 | r = r600_cp_load_microcode(rdev); | |
2483 | if (r) | |
2484 | return r; | |
2485 | r = r600_cp_resume(rdev); | |
2486 | if (r) | |
2487 | return r; | |
2488 | ||
2489 | r = radeon_ib_pool_start(rdev); | |
2490 | if (r) | |
2491 | return r; | |
2492 | ||
2493 | r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); | |
2494 | if (r) { | |
2495 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | |
2496 | rdev->accel_working = false; | |
2497 | return r; | |
2498 | } | |
2499 | ||
2500 | return 0; | |
2501 | } | |
2502 | ||
2503 | void r600_vga_set_state(struct radeon_device *rdev, bool state) | |
2504 | { | |
2505 | uint32_t temp; | |
2506 | ||
2507 | temp = RREG32(CONFIG_CNTL); | |
2508 | if (state == false) { | |
2509 | temp &= ~(1<<0); | |
2510 | temp |= (1<<1); | |
2511 | } else { | |
2512 | temp &= ~(1<<1); | |
2513 | } | |
2514 | WREG32(CONFIG_CNTL, temp); | |
2515 | } | |
2516 | ||
2517 | int r600_resume(struct radeon_device *rdev) | |
2518 | { | |
2519 | int r; | |
2520 | ||
2521 | /* Do not reset GPU before posting, on r600 hw unlike on r500 hw, | |
2522 | * posting will perform necessary task to bring back GPU into good | |
2523 | * shape. | |
2524 | */ | |
2525 | /* post card */ | |
2526 | atom_asic_init(rdev->mode_info.atom_context); | |
2527 | ||
2528 | rdev->accel_working = true; | |
2529 | r = r600_startup(rdev); | |
2530 | if (r) { | |
2531 | DRM_ERROR("r600 startup failed on resume\n"); | |
2532 | return r; | |
2533 | } | |
2534 | ||
2535 | r = r600_audio_init(rdev); | |
2536 | if (r) { | |
2537 | DRM_ERROR("radeon: audio resume failed\n"); | |
2538 | return r; | |
2539 | } | |
2540 | ||
2541 | return r; | |
2542 | } | |
2543 | ||
2544 | int r600_suspend(struct radeon_device *rdev) | |
2545 | { | |
2546 | r600_audio_fini(rdev); | |
2547 | radeon_ib_pool_suspend(rdev); | |
2548 | r600_blit_suspend(rdev); | |
2549 | /* FIXME: we should wait for ring to be empty */ | |
2550 | r600_cp_stop(rdev); | |
2551 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; | |
2552 | r600_irq_suspend(rdev); | |
2553 | radeon_wb_disable(rdev); | |
2554 | r600_pcie_gart_disable(rdev); | |
2555 | ||
2556 | return 0; | |
2557 | } | |
2558 | ||
2559 | /* Plan is to move initialization in that function and use | |
2560 | * helper function so that radeon_device_init pretty much | |
2561 | * do nothing more than calling asic specific function. This | |
2562 | * should also allow to remove a bunch of callback function | |
2563 | * like vram_info. | |
2564 | */ | |
2565 | int r600_init(struct radeon_device *rdev) | |
2566 | { | |
2567 | int r; | |
2568 | ||
2569 | if (r600_debugfs_mc_info_init(rdev)) { | |
2570 | DRM_ERROR("Failed to register debugfs file for mc !\n"); | |
2571 | } | |
2572 | /* This don't do much */ | |
2573 | r = radeon_gem_init(rdev); | |
2574 | if (r) | |
2575 | return r; | |
2576 | /* Read BIOS */ | |
2577 | if (!radeon_get_bios(rdev)) { | |
2578 | if (ASIC_IS_AVIVO(rdev)) | |
2579 | return -EINVAL; | |
2580 | } | |
2581 | /* Must be an ATOMBIOS */ | |
2582 | if (!rdev->is_atom_bios) { | |
2583 | dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); | |
2584 | return -EINVAL; | |
2585 | } | |
2586 | r = radeon_atombios_init(rdev); | |
2587 | if (r) | |
2588 | return r; | |
2589 | /* Post card if necessary */ | |
2590 | if (!radeon_card_posted(rdev)) { | |
2591 | if (!rdev->bios) { | |
2592 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | |
2593 | return -EINVAL; | |
2594 | } | |
2595 | DRM_INFO("GPU not posted. posting now...\n"); | |
2596 | atom_asic_init(rdev->mode_info.atom_context); | |
2597 | } | |
2598 | /* Initialize scratch registers */ | |
2599 | r600_scratch_init(rdev); | |
2600 | /* Initialize surface registers */ | |
2601 | radeon_surface_init(rdev); | |
2602 | /* Initialize clocks */ | |
2603 | radeon_get_clock_info(rdev->ddev); | |
2604 | /* Fence driver */ | |
2605 | r = radeon_fence_driver_init(rdev); | |
2606 | if (r) | |
2607 | return r; | |
2608 | if (rdev->flags & RADEON_IS_AGP) { | |
2609 | r = radeon_agp_init(rdev); | |
2610 | if (r) | |
2611 | radeon_agp_disable(rdev); | |
2612 | } | |
2613 | r = r600_mc_init(rdev); | |
2614 | if (r) | |
2615 | return r; | |
2616 | /* Memory manager */ | |
2617 | r = radeon_bo_init(rdev); | |
2618 | if (r) | |
2619 | return r; | |
2620 | ||
2621 | r = radeon_irq_kms_init(rdev); | |
2622 | if (r) | |
2623 | return r; | |
2624 | ||
2625 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; | |
2626 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); | |
2627 | ||
2628 | rdev->ih.ring_obj = NULL; | |
2629 | r600_ih_ring_init(rdev, 64 * 1024); | |
2630 | ||
2631 | r = r600_pcie_gart_init(rdev); | |
2632 | if (r) | |
2633 | return r; | |
2634 | ||
2635 | r = radeon_ib_pool_init(rdev); | |
2636 | rdev->accel_working = true; | |
2637 | if (r) { | |
2638 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | |
2639 | rdev->accel_working = false; | |
2640 | } | |
2641 | ||
2642 | r = r600_startup(rdev); | |
2643 | if (r) { | |
2644 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | |
2645 | r600_cp_fini(rdev); | |
2646 | r600_irq_fini(rdev); | |
2647 | radeon_wb_fini(rdev); | |
2648 | r100_ib_fini(rdev); | |
2649 | radeon_irq_kms_fini(rdev); | |
2650 | r600_pcie_gart_fini(rdev); | |
2651 | rdev->accel_working = false; | |
2652 | } | |
2653 | ||
2654 | r = r600_audio_init(rdev); | |
2655 | if (r) | |
2656 | return r; /* TODO error handling */ | |
2657 | return 0; | |
2658 | } | |
2659 | ||
2660 | void r600_fini(struct radeon_device *rdev) | |
2661 | { | |
2662 | r600_audio_fini(rdev); | |
2663 | r600_blit_fini(rdev); | |
2664 | r600_cp_fini(rdev); | |
2665 | r600_irq_fini(rdev); | |
2666 | radeon_wb_fini(rdev); | |
2667 | r100_ib_fini(rdev); | |
2668 | radeon_irq_kms_fini(rdev); | |
2669 | r600_pcie_gart_fini(rdev); | |
2670 | r600_vram_scratch_fini(rdev); | |
2671 | radeon_agp_fini(rdev); | |
2672 | radeon_gem_fini(rdev); | |
2673 | radeon_semaphore_driver_fini(rdev); | |
2674 | radeon_fence_driver_fini(rdev); | |
2675 | radeon_bo_fini(rdev); | |
2676 | radeon_atombios_fini(rdev); | |
2677 | kfree(rdev->bios); | |
2678 | rdev->bios = NULL; | |
2679 | } | |
2680 | ||
2681 | ||
2682 | /* | |
2683 | * CS stuff | |
2684 | */ | |
2685 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |
2686 | { | |
2687 | struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; | |
2688 | ||
2689 | /* FIXME: implement */ | |
2690 | radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | |
2691 | radeon_ring_write(ring, | |
2692 | #ifdef __BIG_ENDIAN | |
2693 | (2 << 0) | | |
2694 | #endif | |
2695 | (ib->gpu_addr & 0xFFFFFFFC)); | |
2696 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); | |
2697 | radeon_ring_write(ring, ib->length_dw); | |
2698 | } | |
2699 | ||
2700 | int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | |
2701 | { | |
2702 | struct radeon_ib *ib; | |
2703 | uint32_t scratch; | |
2704 | uint32_t tmp = 0; | |
2705 | unsigned i; | |
2706 | int r; | |
2707 | int ring_index = radeon_ring_index(rdev, ring); | |
2708 | ||
2709 | r = radeon_scratch_get(rdev, &scratch); | |
2710 | if (r) { | |
2711 | DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); | |
2712 | return r; | |
2713 | } | |
2714 | WREG32(scratch, 0xCAFEDEAD); | |
2715 | r = radeon_ib_get(rdev, ring_index, &ib, 256); | |
2716 | if (r) { | |
2717 | DRM_ERROR("radeon: failed to get ib (%d).\n", r); | |
2718 | return r; | |
2719 | } | |
2720 | ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); | |
2721 | ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | |
2722 | ib->ptr[2] = 0xDEADBEEF; | |
2723 | ib->length_dw = 3; | |
2724 | r = radeon_ib_schedule(rdev, ib); | |
2725 | if (r) { | |
2726 | radeon_scratch_free(rdev, scratch); | |
2727 | radeon_ib_free(rdev, &ib); | |
2728 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); | |
2729 | return r; | |
2730 | } | |
2731 | r = radeon_fence_wait(ib->fence, false); | |
2732 | if (r) { | |
2733 | DRM_ERROR("radeon: fence wait failed (%d).\n", r); | |
2734 | return r; | |
2735 | } | |
2736 | for (i = 0; i < rdev->usec_timeout; i++) { | |
2737 | tmp = RREG32(scratch); | |
2738 | if (tmp == 0xDEADBEEF) | |
2739 | break; | |
2740 | DRM_UDELAY(1); | |
2741 | } | |
2742 | if (i < rdev->usec_timeout) { | |
2743 | DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i); | |
2744 | } else { | |
2745 | DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", | |
2746 | scratch, tmp); | |
2747 | r = -EINVAL; | |
2748 | } | |
2749 | radeon_scratch_free(rdev, scratch); | |
2750 | radeon_ib_free(rdev, &ib); | |
2751 | return r; | |
2752 | } | |
2753 | ||
2754 | /* | |
2755 | * Interrupts | |
2756 | * | |
2757 | * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty | |
2758 | * the same as the CP ring buffer, but in reverse. Rather than the CPU | |
2759 | * writing to the ring and the GPU consuming, the GPU writes to the ring | |
2760 | * and host consumes. As the host irq handler processes interrupts, it | |
2761 | * increments the rptr. When the rptr catches up with the wptr, all the | |
2762 | * current interrupts have been processed. | |
2763 | */ | |
2764 | ||
2765 | void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) | |
2766 | { | |
2767 | u32 rb_bufsz; | |
2768 | ||
2769 | /* Align ring size */ | |
2770 | rb_bufsz = drm_order(ring_size / 4); | |
2771 | ring_size = (1 << rb_bufsz) * 4; | |
2772 | rdev->ih.ring_size = ring_size; | |
2773 | rdev->ih.ptr_mask = rdev->ih.ring_size - 1; | |
2774 | rdev->ih.rptr = 0; | |
2775 | } | |
2776 | ||
2777 | static int r600_ih_ring_alloc(struct radeon_device *rdev) | |
2778 | { | |
2779 | int r; | |
2780 | ||
2781 | /* Allocate ring buffer */ | |
2782 | if (rdev->ih.ring_obj == NULL) { | |
2783 | r = radeon_bo_create(rdev, rdev->ih.ring_size, | |
2784 | PAGE_SIZE, true, | |
2785 | RADEON_GEM_DOMAIN_GTT, | |
2786 | &rdev->ih.ring_obj); | |
2787 | if (r) { | |
2788 | DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); | |
2789 | return r; | |
2790 | } | |
2791 | r = radeon_bo_reserve(rdev->ih.ring_obj, false); | |
2792 | if (unlikely(r != 0)) | |
2793 | return r; | |
2794 | r = radeon_bo_pin(rdev->ih.ring_obj, | |
2795 | RADEON_GEM_DOMAIN_GTT, | |
2796 | &rdev->ih.gpu_addr); | |
2797 | if (r) { | |
2798 | radeon_bo_unreserve(rdev->ih.ring_obj); | |
2799 | DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r); | |
2800 | return r; | |
2801 | } | |
2802 | r = radeon_bo_kmap(rdev->ih.ring_obj, | |
2803 | (void **)&rdev->ih.ring); | |
2804 | radeon_bo_unreserve(rdev->ih.ring_obj); | |
2805 | if (r) { | |
2806 | DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r); | |
2807 | return r; | |
2808 | } | |
2809 | } | |
2810 | return 0; | |
2811 | } | |
2812 | ||
2813 | static void r600_ih_ring_fini(struct radeon_device *rdev) | |
2814 | { | |
2815 | int r; | |
2816 | if (rdev->ih.ring_obj) { | |
2817 | r = radeon_bo_reserve(rdev->ih.ring_obj, false); | |
2818 | if (likely(r == 0)) { | |
2819 | radeon_bo_kunmap(rdev->ih.ring_obj); | |
2820 | radeon_bo_unpin(rdev->ih.ring_obj); | |
2821 | radeon_bo_unreserve(rdev->ih.ring_obj); | |
2822 | } | |
2823 | radeon_bo_unref(&rdev->ih.ring_obj); | |
2824 | rdev->ih.ring = NULL; | |
2825 | rdev->ih.ring_obj = NULL; | |
2826 | } | |
2827 | } | |
2828 | ||
2829 | void r600_rlc_stop(struct radeon_device *rdev) | |
2830 | { | |
2831 | ||
2832 | if ((rdev->family >= CHIP_RV770) && | |
2833 | (rdev->family <= CHIP_RV740)) { | |
2834 | /* r7xx asics need to soft reset RLC before halting */ | |
2835 | WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); | |
2836 | RREG32(SRBM_SOFT_RESET); | |
2837 | udelay(15000); | |
2838 | WREG32(SRBM_SOFT_RESET, 0); | |
2839 | RREG32(SRBM_SOFT_RESET); | |
2840 | } | |
2841 | ||
2842 | WREG32(RLC_CNTL, 0); | |
2843 | } | |
2844 | ||
2845 | static void r600_rlc_start(struct radeon_device *rdev) | |
2846 | { | |
2847 | WREG32(RLC_CNTL, RLC_ENABLE); | |
2848 | } | |
2849 | ||
2850 | static int r600_rlc_init(struct radeon_device *rdev) | |
2851 | { | |
2852 | u32 i; | |
2853 | const __be32 *fw_data; | |
2854 | ||
2855 | if (!rdev->rlc_fw) | |
2856 | return -EINVAL; | |
2857 | ||
2858 | r600_rlc_stop(rdev); | |
2859 | ||
2860 | WREG32(RLC_HB_BASE, 0); | |
2861 | WREG32(RLC_HB_CNTL, 0); | |
2862 | WREG32(RLC_HB_RPTR, 0); | |
2863 | WREG32(RLC_HB_WPTR, 0); | |
2864 | if (rdev->family <= CHIP_CAICOS) { | |
2865 | WREG32(RLC_HB_WPTR_LSB_ADDR, 0); | |
2866 | WREG32(RLC_HB_WPTR_MSB_ADDR, 0); | |
2867 | } | |
2868 | WREG32(RLC_MC_CNTL, 0); | |
2869 | WREG32(RLC_UCODE_CNTL, 0); | |
2870 | ||
2871 | fw_data = (const __be32 *)rdev->rlc_fw->data; | |
2872 | if (rdev->family >= CHIP_CAYMAN) { | |
2873 | for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) { | |
2874 | WREG32(RLC_UCODE_ADDR, i); | |
2875 | WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); | |
2876 | } | |
2877 | } else if (rdev->family >= CHIP_CEDAR) { | |
2878 | for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) { | |
2879 | WREG32(RLC_UCODE_ADDR, i); | |
2880 | WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); | |
2881 | } | |
2882 | } else if (rdev->family >= CHIP_RV770) { | |
2883 | for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { | |
2884 | WREG32(RLC_UCODE_ADDR, i); | |
2885 | WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); | |
2886 | } | |
2887 | } else { | |
2888 | for (i = 0; i < RLC_UCODE_SIZE; i++) { | |
2889 | WREG32(RLC_UCODE_ADDR, i); | |
2890 | WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); | |
2891 | } | |
2892 | } | |
2893 | WREG32(RLC_UCODE_ADDR, 0); | |
2894 | ||
2895 | r600_rlc_start(rdev); | |
2896 | ||
2897 | return 0; | |
2898 | } | |
2899 | ||
2900 | static void r600_enable_interrupts(struct radeon_device *rdev) | |
2901 | { | |
2902 | u32 ih_cntl = RREG32(IH_CNTL); | |
2903 | u32 ih_rb_cntl = RREG32(IH_RB_CNTL); | |
2904 | ||
2905 | ih_cntl |= ENABLE_INTR; | |
2906 | ih_rb_cntl |= IH_RB_ENABLE; | |
2907 | WREG32(IH_CNTL, ih_cntl); | |
2908 | WREG32(IH_RB_CNTL, ih_rb_cntl); | |
2909 | rdev->ih.enabled = true; | |
2910 | } | |
2911 | ||
2912 | void r600_disable_interrupts(struct radeon_device *rdev) | |
2913 | { | |
2914 | u32 ih_rb_cntl = RREG32(IH_RB_CNTL); | |
2915 | u32 ih_cntl = RREG32(IH_CNTL); | |
2916 | ||
2917 | ih_rb_cntl &= ~IH_RB_ENABLE; | |
2918 | ih_cntl &= ~ENABLE_INTR; | |
2919 | WREG32(IH_RB_CNTL, ih_rb_cntl); | |
2920 | WREG32(IH_CNTL, ih_cntl); | |
2921 | /* set rptr, wptr to 0 */ | |
2922 | WREG32(IH_RB_RPTR, 0); | |
2923 | WREG32(IH_RB_WPTR, 0); | |
2924 | rdev->ih.enabled = false; | |
2925 | rdev->ih.wptr = 0; | |
2926 | rdev->ih.rptr = 0; | |
2927 | } | |
2928 | ||
2929 | static void r600_disable_interrupt_state(struct radeon_device *rdev) | |
2930 | { | |
2931 | u32 tmp; | |
2932 | ||
2933 | WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); | |
2934 | WREG32(GRBM_INT_CNTL, 0); | |
2935 | WREG32(DxMODE_INT_MASK, 0); | |
2936 | WREG32(D1GRPH_INTERRUPT_CONTROL, 0); | |
2937 | WREG32(D2GRPH_INTERRUPT_CONTROL, 0); | |
2938 | if (ASIC_IS_DCE3(rdev)) { | |
2939 | WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); | |
2940 | WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); | |
2941 | tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
2942 | WREG32(DC_HPD1_INT_CONTROL, tmp); | |
2943 | tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
2944 | WREG32(DC_HPD2_INT_CONTROL, tmp); | |
2945 | tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
2946 | WREG32(DC_HPD3_INT_CONTROL, tmp); | |
2947 | tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
2948 | WREG32(DC_HPD4_INT_CONTROL, tmp); | |
2949 | if (ASIC_IS_DCE32(rdev)) { | |
2950 | tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
2951 | WREG32(DC_HPD5_INT_CONTROL, tmp); | |
2952 | tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
2953 | WREG32(DC_HPD6_INT_CONTROL, tmp); | |
2954 | } | |
2955 | } else { | |
2956 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); | |
2957 | WREG32(DACB_AUTODETECT_INT_CONTROL, 0); | |
2958 | tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; | |
2959 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); | |
2960 | tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; | |
2961 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); | |
2962 | tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; | |
2963 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); | |
2964 | } | |
2965 | } | |
2966 | ||
2967 | int r600_irq_init(struct radeon_device *rdev) | |
2968 | { | |
2969 | int ret = 0; | |
2970 | int rb_bufsz; | |
2971 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; | |
2972 | ||
2973 | /* allocate ring */ | |
2974 | ret = r600_ih_ring_alloc(rdev); | |
2975 | if (ret) | |
2976 | return ret; | |
2977 | ||
2978 | /* disable irqs */ | |
2979 | r600_disable_interrupts(rdev); | |
2980 | ||
2981 | /* init rlc */ | |
2982 | ret = r600_rlc_init(rdev); | |
2983 | if (ret) { | |
2984 | r600_ih_ring_fini(rdev); | |
2985 | return ret; | |
2986 | } | |
2987 | ||
2988 | /* setup interrupt control */ | |
2989 | /* set dummy read address to ring address */ | |
2990 | WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); | |
2991 | interrupt_cntl = RREG32(INTERRUPT_CNTL); | |
2992 | /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi | |
2993 | * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN | |
2994 | */ | |
2995 | interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; | |
2996 | /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */ | |
2997 | interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; | |
2998 | WREG32(INTERRUPT_CNTL, interrupt_cntl); | |
2999 | ||
3000 | WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); | |
3001 | rb_bufsz = drm_order(rdev->ih.ring_size / 4); | |
3002 | ||
3003 | ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | | |
3004 | IH_WPTR_OVERFLOW_CLEAR | | |
3005 | (rb_bufsz << 1)); | |
3006 | ||
3007 | if (rdev->wb.enabled) | |
3008 | ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE; | |
3009 | ||
3010 | /* set the writeback address whether it's enabled or not */ | |
3011 | WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); | |
3012 | WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); | |
3013 | ||
3014 | WREG32(IH_RB_CNTL, ih_rb_cntl); | |
3015 | ||
3016 | /* set rptr, wptr to 0 */ | |
3017 | WREG32(IH_RB_RPTR, 0); | |
3018 | WREG32(IH_RB_WPTR, 0); | |
3019 | ||
3020 | /* Default settings for IH_CNTL (disabled at first) */ | |
3021 | ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10); | |
3022 | /* RPTR_REARM only works if msi's are enabled */ | |
3023 | if (rdev->msi_enabled) | |
3024 | ih_cntl |= RPTR_REARM; | |
3025 | WREG32(IH_CNTL, ih_cntl); | |
3026 | ||
3027 | /* force the active interrupt state to all disabled */ | |
3028 | if (rdev->family >= CHIP_CEDAR) | |
3029 | evergreen_disable_interrupt_state(rdev); | |
3030 | else | |
3031 | r600_disable_interrupt_state(rdev); | |
3032 | ||
3033 | /* enable irqs */ | |
3034 | r600_enable_interrupts(rdev); | |
3035 | ||
3036 | return ret; | |
3037 | } | |
3038 | ||
3039 | void r600_irq_suspend(struct radeon_device *rdev) | |
3040 | { | |
3041 | r600_irq_disable(rdev); | |
3042 | r600_rlc_stop(rdev); | |
3043 | } | |
3044 | ||
3045 | void r600_irq_fini(struct radeon_device *rdev) | |
3046 | { | |
3047 | r600_irq_suspend(rdev); | |
3048 | r600_ih_ring_fini(rdev); | |
3049 | } | |
3050 | ||
3051 | int r600_irq_set(struct radeon_device *rdev) | |
3052 | { | |
3053 | u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; | |
3054 | u32 mode_int = 0; | |
3055 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; | |
3056 | u32 grbm_int_cntl = 0; | |
3057 | u32 hdmi1, hdmi2; | |
3058 | u32 d1grph = 0, d2grph = 0; | |
3059 | ||
3060 | if (!rdev->irq.installed) { | |
3061 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); | |
3062 | return -EINVAL; | |
3063 | } | |
3064 | /* don't enable anything if the ih is disabled */ | |
3065 | if (!rdev->ih.enabled) { | |
3066 | r600_disable_interrupts(rdev); | |
3067 | /* force the active interrupt state to all disabled */ | |
3068 | r600_disable_interrupt_state(rdev); | |
3069 | return 0; | |
3070 | } | |
3071 | ||
3072 | hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN; | |
3073 | if (ASIC_IS_DCE3(rdev)) { | |
3074 | hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN; | |
3075 | hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
3076 | hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
3077 | hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
3078 | hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
3079 | if (ASIC_IS_DCE32(rdev)) { | |
3080 | hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
3081 | hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
3082 | } | |
3083 | } else { | |
3084 | hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN; | |
3085 | hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
3086 | hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
3087 | hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
3088 | } | |
3089 | ||
3090 | if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { | |
3091 | DRM_DEBUG("r600_irq_set: sw int\n"); | |
3092 | cp_int_cntl |= RB_INT_ENABLE; | |
3093 | cp_int_cntl |= TIME_STAMP_INT_ENABLE; | |
3094 | } | |
3095 | if (rdev->irq.crtc_vblank_int[0] || | |
3096 | rdev->irq.pflip[0]) { | |
3097 | DRM_DEBUG("r600_irq_set: vblank 0\n"); | |
3098 | mode_int |= D1MODE_VBLANK_INT_MASK; | |
3099 | } | |
3100 | if (rdev->irq.crtc_vblank_int[1] || | |
3101 | rdev->irq.pflip[1]) { | |
3102 | DRM_DEBUG("r600_irq_set: vblank 1\n"); | |
3103 | mode_int |= D2MODE_VBLANK_INT_MASK; | |
3104 | } | |
3105 | if (rdev->irq.hpd[0]) { | |
3106 | DRM_DEBUG("r600_irq_set: hpd 1\n"); | |
3107 | hpd1 |= DC_HPDx_INT_EN; | |
3108 | } | |
3109 | if (rdev->irq.hpd[1]) { | |
3110 | DRM_DEBUG("r600_irq_set: hpd 2\n"); | |
3111 | hpd2 |= DC_HPDx_INT_EN; | |
3112 | } | |
3113 | if (rdev->irq.hpd[2]) { | |
3114 | DRM_DEBUG("r600_irq_set: hpd 3\n"); | |
3115 | hpd3 |= DC_HPDx_INT_EN; | |
3116 | } | |
3117 | if (rdev->irq.hpd[3]) { | |
3118 | DRM_DEBUG("r600_irq_set: hpd 4\n"); | |
3119 | hpd4 |= DC_HPDx_INT_EN; | |
3120 | } | |
3121 | if (rdev->irq.hpd[4]) { | |
3122 | DRM_DEBUG("r600_irq_set: hpd 5\n"); | |
3123 | hpd5 |= DC_HPDx_INT_EN; | |
3124 | } | |
3125 | if (rdev->irq.hpd[5]) { | |
3126 | DRM_DEBUG("r600_irq_set: hpd 6\n"); | |
3127 | hpd6 |= DC_HPDx_INT_EN; | |
3128 | } | |
3129 | if (rdev->irq.hdmi[0]) { | |
3130 | DRM_DEBUG("r600_irq_set: hdmi 1\n"); | |
3131 | hdmi1 |= R600_HDMI_INT_EN; | |
3132 | } | |
3133 | if (rdev->irq.hdmi[1]) { | |
3134 | DRM_DEBUG("r600_irq_set: hdmi 2\n"); | |
3135 | hdmi2 |= R600_HDMI_INT_EN; | |
3136 | } | |
3137 | if (rdev->irq.gui_idle) { | |
3138 | DRM_DEBUG("gui idle\n"); | |
3139 | grbm_int_cntl |= GUI_IDLE_INT_ENABLE; | |
3140 | } | |
3141 | ||
3142 | WREG32(CP_INT_CNTL, cp_int_cntl); | |
3143 | WREG32(DxMODE_INT_MASK, mode_int); | |
3144 | WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); | |
3145 | WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); | |
3146 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); | |
3147 | WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); | |
3148 | if (ASIC_IS_DCE3(rdev)) { | |
3149 | WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2); | |
3150 | WREG32(DC_HPD1_INT_CONTROL, hpd1); | |
3151 | WREG32(DC_HPD2_INT_CONTROL, hpd2); | |
3152 | WREG32(DC_HPD3_INT_CONTROL, hpd3); | |
3153 | WREG32(DC_HPD4_INT_CONTROL, hpd4); | |
3154 | if (ASIC_IS_DCE32(rdev)) { | |
3155 | WREG32(DC_HPD5_INT_CONTROL, hpd5); | |
3156 | WREG32(DC_HPD6_INT_CONTROL, hpd6); | |
3157 | } | |
3158 | } else { | |
3159 | WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2); | |
3160 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); | |
3161 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); | |
3162 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3); | |
3163 | } | |
3164 | ||
3165 | return 0; | |
3166 | } | |
3167 | ||
3168 | static void r600_irq_ack(struct radeon_device *rdev) | |
3169 | { | |
3170 | u32 tmp; | |
3171 | ||
3172 | if (ASIC_IS_DCE3(rdev)) { | |
3173 | rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); | |
3174 | rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); | |
3175 | rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); | |
3176 | } else { | |
3177 | rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS); | |
3178 | rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); | |
3179 | rdev->irq.stat_regs.r600.disp_int_cont2 = 0; | |
3180 | } | |
3181 | rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS); | |
3182 | rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS); | |
3183 | ||
3184 | if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED) | |
3185 | WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); | |
3186 | if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED) | |
3187 | WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); | |
3188 | if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) | |
3189 | WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); | |
3190 | if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) | |
3191 | WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); | |
3192 | if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) | |
3193 | WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); | |
3194 | if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) | |
3195 | WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); | |
3196 | if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { | |
3197 | if (ASIC_IS_DCE3(rdev)) { | |
3198 | tmp = RREG32(DC_HPD1_INT_CONTROL); | |
3199 | tmp |= DC_HPDx_INT_ACK; | |
3200 | WREG32(DC_HPD1_INT_CONTROL, tmp); | |
3201 | } else { | |
3202 | tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); | |
3203 | tmp |= DC_HPDx_INT_ACK; | |
3204 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); | |
3205 | } | |
3206 | } | |
3207 | if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { | |
3208 | if (ASIC_IS_DCE3(rdev)) { | |
3209 | tmp = RREG32(DC_HPD2_INT_CONTROL); | |
3210 | tmp |= DC_HPDx_INT_ACK; | |
3211 | WREG32(DC_HPD2_INT_CONTROL, tmp); | |
3212 | } else { | |
3213 | tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); | |
3214 | tmp |= DC_HPDx_INT_ACK; | |
3215 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); | |
3216 | } | |
3217 | } | |
3218 | if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { | |
3219 | if (ASIC_IS_DCE3(rdev)) { | |
3220 | tmp = RREG32(DC_HPD3_INT_CONTROL); | |
3221 | tmp |= DC_HPDx_INT_ACK; | |
3222 | WREG32(DC_HPD3_INT_CONTROL, tmp); | |
3223 | } else { | |
3224 | tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); | |
3225 | tmp |= DC_HPDx_INT_ACK; | |
3226 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); | |
3227 | } | |
3228 | } | |
3229 | if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { | |
3230 | tmp = RREG32(DC_HPD4_INT_CONTROL); | |
3231 | tmp |= DC_HPDx_INT_ACK; | |
3232 | WREG32(DC_HPD4_INT_CONTROL, tmp); | |
3233 | } | |
3234 | if (ASIC_IS_DCE32(rdev)) { | |
3235 | if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { | |
3236 | tmp = RREG32(DC_HPD5_INT_CONTROL); | |
3237 | tmp |= DC_HPDx_INT_ACK; | |
3238 | WREG32(DC_HPD5_INT_CONTROL, tmp); | |
3239 | } | |
3240 | if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { | |
3241 | tmp = RREG32(DC_HPD5_INT_CONTROL); | |
3242 | tmp |= DC_HPDx_INT_ACK; | |
3243 | WREG32(DC_HPD6_INT_CONTROL, tmp); | |
3244 | } | |
3245 | } | |
3246 | if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { | |
3247 | WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); | |
3248 | } | |
3249 | if (ASIC_IS_DCE3(rdev)) { | |
3250 | if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { | |
3251 | WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); | |
3252 | } | |
3253 | } else { | |
3254 | if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { | |
3255 | WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); | |
3256 | } | |
3257 | } | |
3258 | } | |
3259 | ||
3260 | void r600_irq_disable(struct radeon_device *rdev) | |
3261 | { | |
3262 | r600_disable_interrupts(rdev); | |
3263 | /* Wait and acknowledge irq */ | |
3264 | mdelay(1); | |
3265 | r600_irq_ack(rdev); | |
3266 | r600_disable_interrupt_state(rdev); | |
3267 | } | |
3268 | ||
3269 | static u32 r600_get_ih_wptr(struct radeon_device *rdev) | |
3270 | { | |
3271 | u32 wptr, tmp; | |
3272 | ||
3273 | if (rdev->wb.enabled) | |
3274 | wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); | |
3275 | else | |
3276 | wptr = RREG32(IH_RB_WPTR); | |
3277 | ||
3278 | if (wptr & RB_OVERFLOW) { | |
3279 | /* When a ring buffer overflow happen start parsing interrupt | |
3280 | * from the last not overwritten vector (wptr + 16). Hopefully | |
3281 | * this should allow us to catchup. | |
3282 | */ | |
3283 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", | |
3284 | wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | |
3285 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; | |
3286 | tmp = RREG32(IH_RB_CNTL); | |
3287 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | |
3288 | WREG32(IH_RB_CNTL, tmp); | |
3289 | } | |
3290 | return (wptr & rdev->ih.ptr_mask); | |
3291 | } | |
3292 | ||
3293 | /* r600 IV Ring | |
3294 | * Each IV ring entry is 128 bits: | |
3295 | * [7:0] - interrupt source id | |
3296 | * [31:8] - reserved | |
3297 | * [59:32] - interrupt source data | |
3298 | * [127:60] - reserved | |
3299 | * | |
3300 | * The basic interrupt vector entries | |
3301 | * are decoded as follows: | |
3302 | * src_id src_data description | |
3303 | * 1 0 D1 Vblank | |
3304 | * 1 1 D1 Vline | |
3305 | * 5 0 D2 Vblank | |
3306 | * 5 1 D2 Vline | |
3307 | * 19 0 FP Hot plug detection A | |
3308 | * 19 1 FP Hot plug detection B | |
3309 | * 19 2 DAC A auto-detection | |
3310 | * 19 3 DAC B auto-detection | |
3311 | * 21 4 HDMI block A | |
3312 | * 21 5 HDMI block B | |
3313 | * 176 - CP_INT RB | |
3314 | * 177 - CP_INT IB1 | |
3315 | * 178 - CP_INT IB2 | |
3316 | * 181 - EOP Interrupt | |
3317 | * 233 - GUI Idle | |
3318 | * | |
3319 | * Note, these are based on r600 and may need to be | |
3320 | * adjusted or added to on newer asics | |
3321 | */ | |
3322 | ||
3323 | int r600_irq_process(struct radeon_device *rdev) | |
3324 | { | |
3325 | u32 wptr; | |
3326 | u32 rptr; | |
3327 | u32 src_id, src_data; | |
3328 | u32 ring_index; | |
3329 | unsigned long flags; | |
3330 | bool queue_hotplug = false; | |
3331 | ||
3332 | if (!rdev->ih.enabled || rdev->shutdown) | |
3333 | return IRQ_NONE; | |
3334 | ||
3335 | /* No MSIs, need a dummy read to flush PCI DMAs */ | |
3336 | if (!rdev->msi_enabled) | |
3337 | RREG32(IH_RB_WPTR); | |
3338 | ||
3339 | wptr = r600_get_ih_wptr(rdev); | |
3340 | rptr = rdev->ih.rptr; | |
3341 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); | |
3342 | ||
3343 | spin_lock_irqsave(&rdev->ih.lock, flags); | |
3344 | ||
3345 | if (rptr == wptr) { | |
3346 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | |
3347 | return IRQ_NONE; | |
3348 | } | |
3349 | ||
3350 | restart_ih: | |
3351 | /* Order reading of wptr vs. reading of IH ring data */ | |
3352 | rmb(); | |
3353 | ||
3354 | /* display interrupts */ | |
3355 | r600_irq_ack(rdev); | |
3356 | ||
3357 | rdev->ih.wptr = wptr; | |
3358 | while (rptr != wptr) { | |
3359 | /* wptr/rptr are in bytes! */ | |
3360 | ring_index = rptr / 4; | |
3361 | src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; | |
3362 | src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; | |
3363 | ||
3364 | switch (src_id) { | |
3365 | case 1: /* D1 vblank/vline */ | |
3366 | switch (src_data) { | |
3367 | case 0: /* D1 vblank */ | |
3368 | if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) { | |
3369 | if (rdev->irq.crtc_vblank_int[0]) { | |
3370 | drm_handle_vblank(rdev->ddev, 0); | |
3371 | rdev->pm.vblank_sync = true; | |
3372 | wake_up(&rdev->irq.vblank_queue); | |
3373 | } | |
3374 | if (rdev->irq.pflip[0]) | |
3375 | radeon_crtc_handle_flip(rdev, 0); | |
3376 | rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; | |
3377 | DRM_DEBUG("IH: D1 vblank\n"); | |
3378 | } | |
3379 | break; | |
3380 | case 1: /* D1 vline */ | |
3381 | if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) { | |
3382 | rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; | |
3383 | DRM_DEBUG("IH: D1 vline\n"); | |
3384 | } | |
3385 | break; | |
3386 | default: | |
3387 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
3388 | break; | |
3389 | } | |
3390 | break; | |
3391 | case 5: /* D2 vblank/vline */ | |
3392 | switch (src_data) { | |
3393 | case 0: /* D2 vblank */ | |
3394 | if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) { | |
3395 | if (rdev->irq.crtc_vblank_int[1]) { | |
3396 | drm_handle_vblank(rdev->ddev, 1); | |
3397 | rdev->pm.vblank_sync = true; | |
3398 | wake_up(&rdev->irq.vblank_queue); | |
3399 | } | |
3400 | if (rdev->irq.pflip[1]) | |
3401 | radeon_crtc_handle_flip(rdev, 1); | |
3402 | rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; | |
3403 | DRM_DEBUG("IH: D2 vblank\n"); | |
3404 | } | |
3405 | break; | |
3406 | case 1: /* D1 vline */ | |
3407 | if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) { | |
3408 | rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; | |
3409 | DRM_DEBUG("IH: D2 vline\n"); | |
3410 | } | |
3411 | break; | |
3412 | default: | |
3413 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
3414 | break; | |
3415 | } | |
3416 | break; | |
3417 | case 19: /* HPD/DAC hotplug */ | |
3418 | switch (src_data) { | |
3419 | case 0: | |
3420 | if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { | |
3421 | rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; | |
3422 | queue_hotplug = true; | |
3423 | DRM_DEBUG("IH: HPD1\n"); | |
3424 | } | |
3425 | break; | |
3426 | case 1: | |
3427 | if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { | |
3428 | rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; | |
3429 | queue_hotplug = true; | |
3430 | DRM_DEBUG("IH: HPD2\n"); | |
3431 | } | |
3432 | break; | |
3433 | case 4: | |
3434 | if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { | |
3435 | rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; | |
3436 | queue_hotplug = true; | |
3437 | DRM_DEBUG("IH: HPD3\n"); | |
3438 | } | |
3439 | break; | |
3440 | case 5: | |
3441 | if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { | |
3442 | rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; | |
3443 | queue_hotplug = true; | |
3444 | DRM_DEBUG("IH: HPD4\n"); | |
3445 | } | |
3446 | break; | |
3447 | case 10: | |
3448 | if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { | |
3449 | rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; | |
3450 | queue_hotplug = true; | |
3451 | DRM_DEBUG("IH: HPD5\n"); | |
3452 | } | |
3453 | break; | |
3454 | case 12: | |
3455 | if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { | |
3456 | rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; | |
3457 | queue_hotplug = true; | |
3458 | DRM_DEBUG("IH: HPD6\n"); | |
3459 | } | |
3460 | break; | |
3461 | default: | |
3462 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
3463 | break; | |
3464 | } | |
3465 | break; | |
3466 | case 21: /* HDMI */ | |
3467 | DRM_DEBUG("IH: HDMI: 0x%x\n", src_data); | |
3468 | r600_audio_schedule_polling(rdev); | |
3469 | break; | |
3470 | case 176: /* CP_INT in ring buffer */ | |
3471 | case 177: /* CP_INT in IB1 */ | |
3472 | case 178: /* CP_INT in IB2 */ | |
3473 | DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); | |
3474 | radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); | |
3475 | break; | |
3476 | case 181: /* CP EOP event */ | |
3477 | DRM_DEBUG("IH: CP EOP\n"); | |
3478 | radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); | |
3479 | break; | |
3480 | case 233: /* GUI IDLE */ | |
3481 | DRM_DEBUG("IH: GUI idle\n"); | |
3482 | rdev->pm.gui_idle = true; | |
3483 | wake_up(&rdev->irq.idle_queue); | |
3484 | break; | |
3485 | default: | |
3486 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
3487 | break; | |
3488 | } | |
3489 | ||
3490 | /* wptr/rptr are in bytes! */ | |
3491 | rptr += 16; | |
3492 | rptr &= rdev->ih.ptr_mask; | |
3493 | } | |
3494 | /* make sure wptr hasn't changed while processing */ | |
3495 | wptr = r600_get_ih_wptr(rdev); | |
3496 | if (wptr != rdev->ih.wptr) | |
3497 | goto restart_ih; | |
3498 | if (queue_hotplug) | |
3499 | schedule_work(&rdev->hotplug_work); | |
3500 | rdev->ih.rptr = rptr; | |
3501 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | |
3502 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | |
3503 | return IRQ_HANDLED; | |
3504 | } | |
3505 | ||
3506 | /* | |
3507 | * Debugfs info | |
3508 | */ | |
3509 | #if defined(CONFIG_DEBUG_FS) | |
3510 | ||
3511 | static int r600_debugfs_mc_info(struct seq_file *m, void *data) | |
3512 | { | |
3513 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
3514 | struct drm_device *dev = node->minor->dev; | |
3515 | struct radeon_device *rdev = dev->dev_private; | |
3516 | ||
3517 | DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS); | |
3518 | DREG32_SYS(m, rdev, VM_L2_STATUS); | |
3519 | return 0; | |
3520 | } | |
3521 | ||
3522 | static struct drm_info_list r600_mc_info_list[] = { | |
3523 | {"r600_mc_info", r600_debugfs_mc_info, 0, NULL}, | |
3524 | }; | |
3525 | #endif | |
3526 | ||
3527 | int r600_debugfs_mc_info_init(struct radeon_device *rdev) | |
3528 | { | |
3529 | #if defined(CONFIG_DEBUG_FS) | |
3530 | return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list)); | |
3531 | #else | |
3532 | return 0; | |
3533 | #endif | |
3534 | } | |
3535 | ||
3536 | /** | |
3537 | * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl | |
3538 | * rdev: radeon device structure | |
3539 | * bo: buffer object struct which userspace is waiting for idle | |
3540 | * | |
3541 | * Some R6XX/R7XX doesn't seems to take into account HDP flush performed | |
3542 | * through ring buffer, this leads to corruption in rendering, see | |
3543 | * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we | |
3544 | * directly perform HDP flush by writing register through MMIO. | |
3545 | */ | |
3546 | void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) | |
3547 | { | |
3548 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read | |
3549 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL. | |
3550 | * This seems to cause problems on some AGP cards. Just use the old | |
3551 | * method for them. | |
3552 | */ | |
3553 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && | |
3554 | rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) { | |
3555 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; | |
3556 | u32 tmp; | |
3557 | ||
3558 | WREG32(HDP_DEBUG1, 0); | |
3559 | tmp = readl((void __iomem *)ptr); | |
3560 | } else | |
3561 | WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | |
3562 | } | |
3563 | ||
3564 | void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes) | |
3565 | { | |
3566 | u32 link_width_cntl, mask, target_reg; | |
3567 | ||
3568 | if (rdev->flags & RADEON_IS_IGP) | |
3569 | return; | |
3570 | ||
3571 | if (!(rdev->flags & RADEON_IS_PCIE)) | |
3572 | return; | |
3573 | ||
3574 | /* x2 cards have a special sequence */ | |
3575 | if (ASIC_IS_X2(rdev)) | |
3576 | return; | |
3577 | ||
3578 | /* FIXME wait for idle */ | |
3579 | ||
3580 | switch (lanes) { | |
3581 | case 0: | |
3582 | mask = RADEON_PCIE_LC_LINK_WIDTH_X0; | |
3583 | break; | |
3584 | case 1: | |
3585 | mask = RADEON_PCIE_LC_LINK_WIDTH_X1; | |
3586 | break; | |
3587 | case 2: | |
3588 | mask = RADEON_PCIE_LC_LINK_WIDTH_X2; | |
3589 | break; | |
3590 | case 4: | |
3591 | mask = RADEON_PCIE_LC_LINK_WIDTH_X4; | |
3592 | break; | |
3593 | case 8: | |
3594 | mask = RADEON_PCIE_LC_LINK_WIDTH_X8; | |
3595 | break; | |
3596 | case 12: | |
3597 | mask = RADEON_PCIE_LC_LINK_WIDTH_X12; | |
3598 | break; | |
3599 | case 16: | |
3600 | default: | |
3601 | mask = RADEON_PCIE_LC_LINK_WIDTH_X16; | |
3602 | break; | |
3603 | } | |
3604 | ||
3605 | link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); | |
3606 | ||
3607 | if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == | |
3608 | (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) | |
3609 | return; | |
3610 | ||
3611 | if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS) | |
3612 | return; | |
3613 | ||
3614 | link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | | |
3615 | RADEON_PCIE_LC_RECONFIG_NOW | | |
3616 | R600_PCIE_LC_RENEGOTIATE_EN | | |
3617 | R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE); | |
3618 | link_width_cntl |= mask; | |
3619 | ||
3620 | WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | |
3621 | ||
3622 | /* some northbridges can renegotiate the link rather than requiring | |
3623 | * a complete re-config. | |
3624 | * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.) | |
3625 | */ | |
3626 | if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT) | |
3627 | link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT; | |
3628 | else | |
3629 | link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE; | |
3630 | ||
3631 | WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | | |
3632 | RADEON_PCIE_LC_RECONFIG_NOW)); | |
3633 | ||
3634 | if (rdev->family >= CHIP_RV770) | |
3635 | target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX; | |
3636 | else | |
3637 | target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX; | |
3638 | ||
3639 | /* wait for lane set to complete */ | |
3640 | link_width_cntl = RREG32(target_reg); | |
3641 | while (link_width_cntl == 0xffffffff) | |
3642 | link_width_cntl = RREG32(target_reg); | |
3643 | ||
3644 | } | |
3645 | ||
3646 | int r600_get_pcie_lanes(struct radeon_device *rdev) | |
3647 | { | |
3648 | u32 link_width_cntl; | |
3649 | ||
3650 | if (rdev->flags & RADEON_IS_IGP) | |
3651 | return 0; | |
3652 | ||
3653 | if (!(rdev->flags & RADEON_IS_PCIE)) | |
3654 | return 0; | |
3655 | ||
3656 | /* x2 cards have a special sequence */ | |
3657 | if (ASIC_IS_X2(rdev)) | |
3658 | return 0; | |
3659 | ||
3660 | /* FIXME wait for idle */ | |
3661 | ||
3662 | link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); | |
3663 | ||
3664 | switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { | |
3665 | case RADEON_PCIE_LC_LINK_WIDTH_X0: | |
3666 | return 0; | |
3667 | case RADEON_PCIE_LC_LINK_WIDTH_X1: | |
3668 | return 1; | |
3669 | case RADEON_PCIE_LC_LINK_WIDTH_X2: | |
3670 | return 2; | |
3671 | case RADEON_PCIE_LC_LINK_WIDTH_X4: | |
3672 | return 4; | |
3673 | case RADEON_PCIE_LC_LINK_WIDTH_X8: | |
3674 | return 8; | |
3675 | case RADEON_PCIE_LC_LINK_WIDTH_X16: | |
3676 | default: | |
3677 | return 16; | |
3678 | } | |
3679 | } | |
3680 | ||
3681 | static void r600_pcie_gen2_enable(struct radeon_device *rdev) | |
3682 | { | |
3683 | u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; | |
3684 | u16 link_cntl2; | |
3685 | ||
3686 | if (radeon_pcie_gen2 == 0) | |
3687 | return; | |
3688 | ||
3689 | if (rdev->flags & RADEON_IS_IGP) | |
3690 | return; | |
3691 | ||
3692 | if (!(rdev->flags & RADEON_IS_PCIE)) | |
3693 | return; | |
3694 | ||
3695 | /* x2 cards have a special sequence */ | |
3696 | if (ASIC_IS_X2(rdev)) | |
3697 | return; | |
3698 | ||
3699 | /* only RV6xx+ chips are supported */ | |
3700 | if (rdev->family <= CHIP_R600) | |
3701 | return; | |
3702 | ||
3703 | /* 55 nm r6xx asics */ | |
3704 | if ((rdev->family == CHIP_RV670) || | |
3705 | (rdev->family == CHIP_RV620) || | |
3706 | (rdev->family == CHIP_RV635)) { | |
3707 | /* advertise upconfig capability */ | |
3708 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | |
3709 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; | |
3710 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | |
3711 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | |
3712 | if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { | |
3713 | lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; | |
3714 | link_width_cntl &= ~(LC_LINK_WIDTH_MASK | | |
3715 | LC_RECONFIG_ARC_MISSING_ESCAPE); | |
3716 | link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN; | |
3717 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | |
3718 | } else { | |
3719 | link_width_cntl |= LC_UPCONFIGURE_DIS; | |
3720 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | |
3721 | } | |
3722 | } | |
3723 | ||
3724 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | |
3725 | if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && | |
3726 | (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { | |
3727 | ||
3728 | /* 55 nm r6xx asics */ | |
3729 | if ((rdev->family == CHIP_RV670) || | |
3730 | (rdev->family == CHIP_RV620) || | |
3731 | (rdev->family == CHIP_RV635)) { | |
3732 | WREG32(MM_CFGREGS_CNTL, 0x8); | |
3733 | link_cntl2 = RREG32(0x4088); | |
3734 | WREG32(MM_CFGREGS_CNTL, 0); | |
3735 | /* not supported yet */ | |
3736 | if (link_cntl2 & SELECTABLE_DEEMPHASIS) | |
3737 | return; | |
3738 | } | |
3739 | ||
3740 | speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK; | |
3741 | speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT); | |
3742 | speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK; | |
3743 | speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE; | |
3744 | speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE; | |
3745 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | |
3746 | ||
3747 | tmp = RREG32(0x541c); | |
3748 | WREG32(0x541c, tmp | 0x8); | |
3749 | WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); | |
3750 | link_cntl2 = RREG16(0x4088); | |
3751 | link_cntl2 &= ~TARGET_LINK_SPEED_MASK; | |
3752 | link_cntl2 |= 0x2; | |
3753 | WREG16(0x4088, link_cntl2); | |
3754 | WREG32(MM_CFGREGS_CNTL, 0); | |
3755 | ||
3756 | if ((rdev->family == CHIP_RV670) || | |
3757 | (rdev->family == CHIP_RV620) || | |
3758 | (rdev->family == CHIP_RV635)) { | |
3759 | training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL); | |
3760 | training_cntl &= ~LC_POINT_7_PLUS_EN; | |
3761 | WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl); | |
3762 | } else { | |
3763 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | |
3764 | speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; | |
3765 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | |
3766 | } | |
3767 | ||
3768 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | |
3769 | speed_cntl |= LC_GEN2_EN_STRAP; | |
3770 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | |
3771 | ||
3772 | } else { | |
3773 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | |
3774 | /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ | |
3775 | if (1) | |
3776 | link_width_cntl |= LC_UPCONFIGURE_DIS; | |
3777 | else | |
3778 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; | |
3779 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | |
3780 | } | |
3781 | } |