2 * Copyright 2010 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
29 #include "radeon_asic.h"
30 #include "radeon_drm.h"
31 #include "evergreend.h"
34 #include "evergreen_reg.h"
35 #include "evergreen_blit_shaders.h"
37 #define EVERGREEN_PFP_UCODE_SIZE 1120
38 #define EVERGREEN_PM4_UCODE_SIZE 1376
40 static void evergreen_gpu_init(struct radeon_device
*rdev
);
41 void evergreen_fini(struct radeon_device
*rdev
);
42 void evergreen_pcie_gen2_enable(struct radeon_device
*rdev
);
43 extern void cayman_cp_int_cntl_setup(struct radeon_device
*rdev
,
44 int ring
, u32 cp_int_cntl
);
46 void evergreen_tiling_fields(unsigned tiling_flags
, unsigned *bankw
,
47 unsigned *bankh
, unsigned *mtaspect
,
50 *bankw
= (tiling_flags
>> RADEON_TILING_EG_BANKW_SHIFT
) & RADEON_TILING_EG_BANKW_MASK
;
51 *bankh
= (tiling_flags
>> RADEON_TILING_EG_BANKH_SHIFT
) & RADEON_TILING_EG_BANKH_MASK
;
52 *mtaspect
= (tiling_flags
>> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
;
53 *tile_split
= (tiling_flags
>> RADEON_TILING_EG_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_TILE_SPLIT_MASK
;
56 case 1: *bankw
= EVERGREEN_ADDR_SURF_BANK_WIDTH_1
; break;
57 case 2: *bankw
= EVERGREEN_ADDR_SURF_BANK_WIDTH_2
; break;
58 case 4: *bankw
= EVERGREEN_ADDR_SURF_BANK_WIDTH_4
; break;
59 case 8: *bankw
= EVERGREEN_ADDR_SURF_BANK_WIDTH_8
; break;
63 case 1: *bankh
= EVERGREEN_ADDR_SURF_BANK_HEIGHT_1
; break;
64 case 2: *bankh
= EVERGREEN_ADDR_SURF_BANK_HEIGHT_2
; break;
65 case 4: *bankh
= EVERGREEN_ADDR_SURF_BANK_HEIGHT_4
; break;
66 case 8: *bankh
= EVERGREEN_ADDR_SURF_BANK_HEIGHT_8
; break;
70 case 1: *mtaspect
= EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1
; break;
71 case 2: *mtaspect
= EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2
; break;
72 case 4: *mtaspect
= EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4
; break;
73 case 8: *mtaspect
= EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8
; break;
77 void evergreen_fix_pci_max_read_req_size(struct radeon_device
*rdev
)
82 cap
= pci_pcie_cap(rdev
->pdev
);
86 err
= pci_read_config_word(rdev
->pdev
, cap
+ PCI_EXP_DEVCTL
, &ctl
);
90 v
= (ctl
& PCI_EXP_DEVCTL_READRQ
) >> 12;
92 /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
93 * to avoid hangs or perfomance issues
95 if ((v
== 0) || (v
== 6) || (v
== 7)) {
96 ctl
&= ~PCI_EXP_DEVCTL_READRQ
;
98 pci_write_config_word(rdev
->pdev
, cap
+ PCI_EXP_DEVCTL
, ctl
);
102 void dce4_wait_for_vblank(struct radeon_device
*rdev
, int crtc
)
104 struct radeon_crtc
*radeon_crtc
= rdev
->mode_info
.crtcs
[crtc
];
107 if (RREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
) & EVERGREEN_CRTC_MASTER_EN
) {
108 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
109 if (!(RREG32(EVERGREEN_CRTC_STATUS
+ radeon_crtc
->crtc_offset
) & EVERGREEN_CRTC_V_BLANK
))
113 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
114 if (RREG32(EVERGREEN_CRTC_STATUS
+ radeon_crtc
->crtc_offset
) & EVERGREEN_CRTC_V_BLANK
)
121 void evergreen_pre_page_flip(struct radeon_device
*rdev
, int crtc
)
123 /* enable the pflip int */
124 radeon_irq_kms_pflip_irq_get(rdev
, crtc
);
127 void evergreen_post_page_flip(struct radeon_device
*rdev
, int crtc
)
129 /* disable the pflip int */
130 radeon_irq_kms_pflip_irq_put(rdev
, crtc
);
133 u32
evergreen_page_flip(struct radeon_device
*rdev
, int crtc_id
, u64 crtc_base
)
135 struct radeon_crtc
*radeon_crtc
= rdev
->mode_info
.crtcs
[crtc_id
];
136 u32 tmp
= RREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
);
139 /* Lock the graphics update lock */
140 tmp
|= EVERGREEN_GRPH_UPDATE_LOCK
;
141 WREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
, tmp
);
143 /* update the scanout addresses */
144 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ radeon_crtc
->crtc_offset
,
145 upper_32_bits(crtc_base
));
146 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ radeon_crtc
->crtc_offset
,
149 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ radeon_crtc
->crtc_offset
,
150 upper_32_bits(crtc_base
));
151 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ radeon_crtc
->crtc_offset
,
154 /* Wait for update_pending to go high. */
155 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
156 if (RREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING
)
160 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
162 /* Unlock the lock, so double-buffering can take place inside vblank */
163 tmp
&= ~EVERGREEN_GRPH_UPDATE_LOCK
;
164 WREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
, tmp
);
166 /* Return current update_pending status: */
167 return RREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING
;
170 /* get temperature in millidegrees */
171 int evergreen_get_temp(struct radeon_device
*rdev
)
176 if (rdev
->family
== CHIP_JUNIPER
) {
177 toffset
= (RREG32(CG_THERMAL_CTRL
) & TOFFSET_MASK
) >>
179 temp
= (RREG32(CG_TS0_STATUS
) & TS0_ADC_DOUT_MASK
) >>
183 actual_temp
= temp
/ 2 - (0x200 - toffset
);
185 actual_temp
= temp
/ 2 + toffset
;
187 actual_temp
= actual_temp
* 1000;
190 temp
= (RREG32(CG_MULT_THERMAL_STATUS
) & ASIC_T_MASK
) >>
195 else if (temp
& 0x200)
197 else if (temp
& 0x100) {
198 actual_temp
= temp
& 0x1ff;
199 actual_temp
|= ~0x1ff;
201 actual_temp
= temp
& 0xff;
203 actual_temp
= (actual_temp
* 1000) / 2;
209 int sumo_get_temp(struct radeon_device
*rdev
)
211 u32 temp
= RREG32(CG_THERMAL_STATUS
) & 0xff;
212 int actual_temp
= temp
- 49;
214 return actual_temp
* 1000;
217 void sumo_pm_init_profile(struct radeon_device
*rdev
)
222 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
223 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
224 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
225 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
228 if (rdev
->flags
& RADEON_IS_MOBILITY
)
229 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
231 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
233 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= idx
;
234 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= idx
;
235 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
236 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
238 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= idx
;
239 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= idx
;
240 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
241 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
243 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= idx
;
244 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= idx
;
245 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
246 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
248 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= idx
;
249 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= idx
;
250 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
251 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
254 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
255 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= idx
;
256 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= idx
;
257 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
258 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
=
259 rdev
->pm
.power_state
[idx
].num_clock_modes
- 1;
261 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= idx
;
262 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= idx
;
263 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
264 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
=
265 rdev
->pm
.power_state
[idx
].num_clock_modes
- 1;
268 void evergreen_pm_misc(struct radeon_device
*rdev
)
270 int req_ps_idx
= rdev
->pm
.requested_power_state_index
;
271 int req_cm_idx
= rdev
->pm
.requested_clock_mode_index
;
272 struct radeon_power_state
*ps
= &rdev
->pm
.power_state
[req_ps_idx
];
273 struct radeon_voltage
*voltage
= &ps
->clock_info
[req_cm_idx
].voltage
;
275 if (voltage
->type
== VOLTAGE_SW
) {
276 /* 0xff01 is a flag rather then an actual voltage */
277 if (voltage
->voltage
== 0xff01)
279 if (voltage
->voltage
&& (voltage
->voltage
!= rdev
->pm
.current_vddc
)) {
280 radeon_atom_set_voltage(rdev
, voltage
->voltage
, SET_VOLTAGE_TYPE_ASIC_VDDC
);
281 rdev
->pm
.current_vddc
= voltage
->voltage
;
282 DRM_DEBUG("Setting: vddc: %d\n", voltage
->voltage
);
284 /* 0xff01 is a flag rather then an actual voltage */
285 if (voltage
->vddci
== 0xff01)
287 if (voltage
->vddci
&& (voltage
->vddci
!= rdev
->pm
.current_vddci
)) {
288 radeon_atom_set_voltage(rdev
, voltage
->vddci
, SET_VOLTAGE_TYPE_ASIC_VDDCI
);
289 rdev
->pm
.current_vddci
= voltage
->vddci
;
290 DRM_DEBUG("Setting: vddci: %d\n", voltage
->vddci
);
295 void evergreen_pm_prepare(struct radeon_device
*rdev
)
297 struct drm_device
*ddev
= rdev
->ddev
;
298 struct drm_crtc
*crtc
;
299 struct radeon_crtc
*radeon_crtc
;
302 /* disable any active CRTCs */
303 list_for_each_entry(crtc
, &ddev
->mode_config
.crtc_list
, head
) {
304 radeon_crtc
= to_radeon_crtc(crtc
);
305 if (radeon_crtc
->enabled
) {
306 tmp
= RREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
);
307 tmp
|= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE
;
308 WREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
, tmp
);
313 void evergreen_pm_finish(struct radeon_device
*rdev
)
315 struct drm_device
*ddev
= rdev
->ddev
;
316 struct drm_crtc
*crtc
;
317 struct radeon_crtc
*radeon_crtc
;
320 /* enable any active CRTCs */
321 list_for_each_entry(crtc
, &ddev
->mode_config
.crtc_list
, head
) {
322 radeon_crtc
= to_radeon_crtc(crtc
);
323 if (radeon_crtc
->enabled
) {
324 tmp
= RREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
);
325 tmp
&= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE
;
326 WREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
, tmp
);
331 bool evergreen_hpd_sense(struct radeon_device
*rdev
, enum radeon_hpd_id hpd
)
333 bool connected
= false;
337 if (RREG32(DC_HPD1_INT_STATUS
) & DC_HPDx_SENSE
)
341 if (RREG32(DC_HPD2_INT_STATUS
) & DC_HPDx_SENSE
)
345 if (RREG32(DC_HPD3_INT_STATUS
) & DC_HPDx_SENSE
)
349 if (RREG32(DC_HPD4_INT_STATUS
) & DC_HPDx_SENSE
)
353 if (RREG32(DC_HPD5_INT_STATUS
) & DC_HPDx_SENSE
)
357 if (RREG32(DC_HPD6_INT_STATUS
) & DC_HPDx_SENSE
)
367 void evergreen_hpd_set_polarity(struct radeon_device
*rdev
,
368 enum radeon_hpd_id hpd
)
371 bool connected
= evergreen_hpd_sense(rdev
, hpd
);
375 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
377 tmp
&= ~DC_HPDx_INT_POLARITY
;
379 tmp
|= DC_HPDx_INT_POLARITY
;
380 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
383 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
385 tmp
&= ~DC_HPDx_INT_POLARITY
;
387 tmp
|= DC_HPDx_INT_POLARITY
;
388 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
391 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
393 tmp
&= ~DC_HPDx_INT_POLARITY
;
395 tmp
|= DC_HPDx_INT_POLARITY
;
396 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
399 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
401 tmp
&= ~DC_HPDx_INT_POLARITY
;
403 tmp
|= DC_HPDx_INT_POLARITY
;
404 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
407 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
409 tmp
&= ~DC_HPDx_INT_POLARITY
;
411 tmp
|= DC_HPDx_INT_POLARITY
;
412 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
415 tmp
= RREG32(DC_HPD6_INT_CONTROL
);
417 tmp
&= ~DC_HPDx_INT_POLARITY
;
419 tmp
|= DC_HPDx_INT_POLARITY
;
420 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
427 void evergreen_hpd_init(struct radeon_device
*rdev
)
429 struct drm_device
*dev
= rdev
->ddev
;
430 struct drm_connector
*connector
;
431 u32 tmp
= DC_HPDx_CONNECTION_TIMER(0x9c4) |
432 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN
;
434 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
435 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
436 switch (radeon_connector
->hpd
.hpd
) {
438 WREG32(DC_HPD1_CONTROL
, tmp
);
439 rdev
->irq
.hpd
[0] = true;
442 WREG32(DC_HPD2_CONTROL
, tmp
);
443 rdev
->irq
.hpd
[1] = true;
446 WREG32(DC_HPD3_CONTROL
, tmp
);
447 rdev
->irq
.hpd
[2] = true;
450 WREG32(DC_HPD4_CONTROL
, tmp
);
451 rdev
->irq
.hpd
[3] = true;
454 WREG32(DC_HPD5_CONTROL
, tmp
);
455 rdev
->irq
.hpd
[4] = true;
458 WREG32(DC_HPD6_CONTROL
, tmp
);
459 rdev
->irq
.hpd
[5] = true;
464 radeon_hpd_set_polarity(rdev
, radeon_connector
->hpd
.hpd
);
466 if (rdev
->irq
.installed
)
467 evergreen_irq_set(rdev
);
470 void evergreen_hpd_fini(struct radeon_device
*rdev
)
472 struct drm_device
*dev
= rdev
->ddev
;
473 struct drm_connector
*connector
;
475 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
476 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
477 switch (radeon_connector
->hpd
.hpd
) {
479 WREG32(DC_HPD1_CONTROL
, 0);
480 rdev
->irq
.hpd
[0] = false;
483 WREG32(DC_HPD2_CONTROL
, 0);
484 rdev
->irq
.hpd
[1] = false;
487 WREG32(DC_HPD3_CONTROL
, 0);
488 rdev
->irq
.hpd
[2] = false;
491 WREG32(DC_HPD4_CONTROL
, 0);
492 rdev
->irq
.hpd
[3] = false;
495 WREG32(DC_HPD5_CONTROL
, 0);
496 rdev
->irq
.hpd
[4] = false;
499 WREG32(DC_HPD6_CONTROL
, 0);
500 rdev
->irq
.hpd
[5] = false;
508 /* watermark setup */
510 static u32
evergreen_line_buffer_adjust(struct radeon_device
*rdev
,
511 struct radeon_crtc
*radeon_crtc
,
512 struct drm_display_mode
*mode
,
513 struct drm_display_mode
*other_mode
)
518 * There are 3 line buffers, each one shared by 2 display controllers.
519 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
520 * the display controllers. The paritioning is done via one of four
521 * preset allocations specified in bits 2:0:
522 * first display controller
523 * 0 - first half of lb (3840 * 2)
524 * 1 - first 3/4 of lb (5760 * 2)
525 * 2 - whole lb (7680 * 2), other crtc must be disabled
526 * 3 - first 1/4 of lb (1920 * 2)
527 * second display controller
528 * 4 - second half of lb (3840 * 2)
529 * 5 - second 3/4 of lb (5760 * 2)
530 * 6 - whole lb (7680 * 2), other crtc must be disabled
531 * 7 - last 1/4 of lb (1920 * 2)
533 /* this can get tricky if we have two large displays on a paired group
534 * of crtcs. Ideally for multiple large displays we'd assign them to
535 * non-linked crtcs for maximum line buffer allocation.
537 if (radeon_crtc
->base
.enabled
&& mode
) {
545 /* second controller of the pair uses second half of the lb */
546 if (radeon_crtc
->crtc_id
% 2)
548 WREG32(DC_LB_MEMORY_SPLIT
+ radeon_crtc
->crtc_offset
, tmp
);
550 if (radeon_crtc
->base
.enabled
&& mode
) {
555 if (ASIC_IS_DCE5(rdev
))
561 if (ASIC_IS_DCE5(rdev
))
567 if (ASIC_IS_DCE5(rdev
))
573 if (ASIC_IS_DCE5(rdev
))
580 /* controller not enabled, so no lb used */
584 static u32
evergreen_get_number_of_dram_channels(struct radeon_device
*rdev
)
586 u32 tmp
= RREG32(MC_SHARED_CHMAP
);
588 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
601 struct evergreen_wm_params
{
602 u32 dram_channels
; /* number of dram channels */
603 u32 yclk
; /* bandwidth per dram data pin in kHz */
604 u32 sclk
; /* engine clock in kHz */
605 u32 disp_clk
; /* display clock in kHz */
606 u32 src_width
; /* viewport width */
607 u32 active_time
; /* active display time in ns */
608 u32 blank_time
; /* blank time in ns */
609 bool interlaced
; /* mode is interlaced */
610 fixed20_12 vsc
; /* vertical scale ratio */
611 u32 num_heads
; /* number of active crtcs */
612 u32 bytes_per_pixel
; /* bytes per pixel display + overlay */
613 u32 lb_size
; /* line buffer allocated to pipe */
614 u32 vtaps
; /* vertical scaler taps */
617 static u32
evergreen_dram_bandwidth(struct evergreen_wm_params
*wm
)
619 /* Calculate DRAM Bandwidth and the part allocated to display. */
620 fixed20_12 dram_efficiency
; /* 0.7 */
621 fixed20_12 yclk
, dram_channels
, bandwidth
;
624 a
.full
= dfixed_const(1000);
625 yclk
.full
= dfixed_const(wm
->yclk
);
626 yclk
.full
= dfixed_div(yclk
, a
);
627 dram_channels
.full
= dfixed_const(wm
->dram_channels
* 4);
628 a
.full
= dfixed_const(10);
629 dram_efficiency
.full
= dfixed_const(7);
630 dram_efficiency
.full
= dfixed_div(dram_efficiency
, a
);
631 bandwidth
.full
= dfixed_mul(dram_channels
, yclk
);
632 bandwidth
.full
= dfixed_mul(bandwidth
, dram_efficiency
);
634 return dfixed_trunc(bandwidth
);
637 static u32
evergreen_dram_bandwidth_for_display(struct evergreen_wm_params
*wm
)
639 /* Calculate DRAM Bandwidth and the part allocated to display. */
640 fixed20_12 disp_dram_allocation
; /* 0.3 to 0.7 */
641 fixed20_12 yclk
, dram_channels
, bandwidth
;
644 a
.full
= dfixed_const(1000);
645 yclk
.full
= dfixed_const(wm
->yclk
);
646 yclk
.full
= dfixed_div(yclk
, a
);
647 dram_channels
.full
= dfixed_const(wm
->dram_channels
* 4);
648 a
.full
= dfixed_const(10);
649 disp_dram_allocation
.full
= dfixed_const(3); /* XXX worse case value 0.3 */
650 disp_dram_allocation
.full
= dfixed_div(disp_dram_allocation
, a
);
651 bandwidth
.full
= dfixed_mul(dram_channels
, yclk
);
652 bandwidth
.full
= dfixed_mul(bandwidth
, disp_dram_allocation
);
654 return dfixed_trunc(bandwidth
);
657 static u32
evergreen_data_return_bandwidth(struct evergreen_wm_params
*wm
)
659 /* Calculate the display Data return Bandwidth */
660 fixed20_12 return_efficiency
; /* 0.8 */
661 fixed20_12 sclk
, bandwidth
;
664 a
.full
= dfixed_const(1000);
665 sclk
.full
= dfixed_const(wm
->sclk
);
666 sclk
.full
= dfixed_div(sclk
, a
);
667 a
.full
= dfixed_const(10);
668 return_efficiency
.full
= dfixed_const(8);
669 return_efficiency
.full
= dfixed_div(return_efficiency
, a
);
670 a
.full
= dfixed_const(32);
671 bandwidth
.full
= dfixed_mul(a
, sclk
);
672 bandwidth
.full
= dfixed_mul(bandwidth
, return_efficiency
);
674 return dfixed_trunc(bandwidth
);
677 static u32
evergreen_dmif_request_bandwidth(struct evergreen_wm_params
*wm
)
679 /* Calculate the DMIF Request Bandwidth */
680 fixed20_12 disp_clk_request_efficiency
; /* 0.8 */
681 fixed20_12 disp_clk
, bandwidth
;
684 a
.full
= dfixed_const(1000);
685 disp_clk
.full
= dfixed_const(wm
->disp_clk
);
686 disp_clk
.full
= dfixed_div(disp_clk
, a
);
687 a
.full
= dfixed_const(10);
688 disp_clk_request_efficiency
.full
= dfixed_const(8);
689 disp_clk_request_efficiency
.full
= dfixed_div(disp_clk_request_efficiency
, a
);
690 a
.full
= dfixed_const(32);
691 bandwidth
.full
= dfixed_mul(a
, disp_clk
);
692 bandwidth
.full
= dfixed_mul(bandwidth
, disp_clk_request_efficiency
);
694 return dfixed_trunc(bandwidth
);
697 static u32
evergreen_available_bandwidth(struct evergreen_wm_params
*wm
)
699 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
700 u32 dram_bandwidth
= evergreen_dram_bandwidth(wm
);
701 u32 data_return_bandwidth
= evergreen_data_return_bandwidth(wm
);
702 u32 dmif_req_bandwidth
= evergreen_dmif_request_bandwidth(wm
);
704 return min(dram_bandwidth
, min(data_return_bandwidth
, dmif_req_bandwidth
));
707 static u32
evergreen_average_bandwidth(struct evergreen_wm_params
*wm
)
709 /* Calculate the display mode Average Bandwidth
710 * DisplayMode should contain the source and destination dimensions,
714 fixed20_12 line_time
;
715 fixed20_12 src_width
;
716 fixed20_12 bandwidth
;
719 a
.full
= dfixed_const(1000);
720 line_time
.full
= dfixed_const(wm
->active_time
+ wm
->blank_time
);
721 line_time
.full
= dfixed_div(line_time
, a
);
722 bpp
.full
= dfixed_const(wm
->bytes_per_pixel
);
723 src_width
.full
= dfixed_const(wm
->src_width
);
724 bandwidth
.full
= dfixed_mul(src_width
, bpp
);
725 bandwidth
.full
= dfixed_mul(bandwidth
, wm
->vsc
);
726 bandwidth
.full
= dfixed_div(bandwidth
, line_time
);
728 return dfixed_trunc(bandwidth
);
731 static u32
evergreen_latency_watermark(struct evergreen_wm_params
*wm
)
733 /* First calcualte the latency in ns */
734 u32 mc_latency
= 2000; /* 2000 ns. */
735 u32 available_bandwidth
= evergreen_available_bandwidth(wm
);
736 u32 worst_chunk_return_time
= (512 * 8 * 1000) / available_bandwidth
;
737 u32 cursor_line_pair_return_time
= (128 * 4 * 1000) / available_bandwidth
;
738 u32 dc_latency
= 40000000 / wm
->disp_clk
; /* dc pipe latency */
739 u32 other_heads_data_return_time
= ((wm
->num_heads
+ 1) * worst_chunk_return_time
) +
740 (wm
->num_heads
* cursor_line_pair_return_time
);
741 u32 latency
= mc_latency
+ other_heads_data_return_time
+ dc_latency
;
742 u32 max_src_lines_per_dst_line
, lb_fill_bw
, line_fill_time
;
745 if (wm
->num_heads
== 0)
748 a
.full
= dfixed_const(2);
749 b
.full
= dfixed_const(1);
750 if ((wm
->vsc
.full
> a
.full
) ||
751 ((wm
->vsc
.full
> b
.full
) && (wm
->vtaps
>= 3)) ||
753 ((wm
->vsc
.full
>= a
.full
) && wm
->interlaced
))
754 max_src_lines_per_dst_line
= 4;
756 max_src_lines_per_dst_line
= 2;
758 a
.full
= dfixed_const(available_bandwidth
);
759 b
.full
= dfixed_const(wm
->num_heads
);
760 a
.full
= dfixed_div(a
, b
);
762 b
.full
= dfixed_const(1000);
763 c
.full
= dfixed_const(wm
->disp_clk
);
764 b
.full
= dfixed_div(c
, b
);
765 c
.full
= dfixed_const(wm
->bytes_per_pixel
);
766 b
.full
= dfixed_mul(b
, c
);
768 lb_fill_bw
= min(dfixed_trunc(a
), dfixed_trunc(b
));
770 a
.full
= dfixed_const(max_src_lines_per_dst_line
* wm
->src_width
* wm
->bytes_per_pixel
);
771 b
.full
= dfixed_const(1000);
772 c
.full
= dfixed_const(lb_fill_bw
);
773 b
.full
= dfixed_div(c
, b
);
774 a
.full
= dfixed_div(a
, b
);
775 line_fill_time
= dfixed_trunc(a
);
777 if (line_fill_time
< wm
->active_time
)
780 return latency
+ (line_fill_time
- wm
->active_time
);
784 static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params
*wm
)
786 if (evergreen_average_bandwidth(wm
) <=
787 (evergreen_dram_bandwidth_for_display(wm
) / wm
->num_heads
))
793 static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params
*wm
)
795 if (evergreen_average_bandwidth(wm
) <=
796 (evergreen_available_bandwidth(wm
) / wm
->num_heads
))
802 static bool evergreen_check_latency_hiding(struct evergreen_wm_params
*wm
)
804 u32 lb_partitions
= wm
->lb_size
/ wm
->src_width
;
805 u32 line_time
= wm
->active_time
+ wm
->blank_time
;
806 u32 latency_tolerant_lines
;
810 a
.full
= dfixed_const(1);
811 if (wm
->vsc
.full
> a
.full
)
812 latency_tolerant_lines
= 1;
814 if (lb_partitions
<= (wm
->vtaps
+ 1))
815 latency_tolerant_lines
= 1;
817 latency_tolerant_lines
= 2;
820 latency_hiding
= (latency_tolerant_lines
* line_time
+ wm
->blank_time
);
822 if (evergreen_latency_watermark(wm
) <= latency_hiding
)
828 static void evergreen_program_watermarks(struct radeon_device
*rdev
,
829 struct radeon_crtc
*radeon_crtc
,
830 u32 lb_size
, u32 num_heads
)
832 struct drm_display_mode
*mode
= &radeon_crtc
->base
.mode
;
833 struct evergreen_wm_params wm
;
836 u32 latency_watermark_a
= 0, latency_watermark_b
= 0;
837 u32 priority_a_mark
= 0, priority_b_mark
= 0;
838 u32 priority_a_cnt
= PRIORITY_OFF
;
839 u32 priority_b_cnt
= PRIORITY_OFF
;
840 u32 pipe_offset
= radeon_crtc
->crtc_id
* 16;
841 u32 tmp
, arb_control3
;
844 if (radeon_crtc
->base
.enabled
&& num_heads
&& mode
) {
845 pixel_period
= 1000000 / (u32
)mode
->clock
;
846 line_time
= min((u32
)mode
->crtc_htotal
* pixel_period
, (u32
)65535);
850 wm
.yclk
= rdev
->pm
.current_mclk
* 10;
851 wm
.sclk
= rdev
->pm
.current_sclk
* 10;
852 wm
.disp_clk
= mode
->clock
;
853 wm
.src_width
= mode
->crtc_hdisplay
;
854 wm
.active_time
= mode
->crtc_hdisplay
* pixel_period
;
855 wm
.blank_time
= line_time
- wm
.active_time
;
856 wm
.interlaced
= false;
857 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
858 wm
.interlaced
= true;
859 wm
.vsc
= radeon_crtc
->vsc
;
861 if (radeon_crtc
->rmx_type
!= RMX_OFF
)
863 wm
.bytes_per_pixel
= 4; /* XXX: get this from fb config */
864 wm
.lb_size
= lb_size
;
865 wm
.dram_channels
= evergreen_get_number_of_dram_channels(rdev
);
866 wm
.num_heads
= num_heads
;
868 /* set for high clocks */
869 latency_watermark_a
= min(evergreen_latency_watermark(&wm
), (u32
)65535);
870 /* set for low clocks */
871 /* wm.yclk = low clk; wm.sclk = low clk */
872 latency_watermark_b
= min(evergreen_latency_watermark(&wm
), (u32
)65535);
874 /* possibly force display priority to high */
875 /* should really do this at mode validation time... */
876 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm
) ||
877 !evergreen_average_bandwidth_vs_available_bandwidth(&wm
) ||
878 !evergreen_check_latency_hiding(&wm
) ||
879 (rdev
->disp_priority
== 2)) {
880 DRM_DEBUG_KMS("force priority to high\n");
881 priority_a_cnt
|= PRIORITY_ALWAYS_ON
;
882 priority_b_cnt
|= PRIORITY_ALWAYS_ON
;
885 a
.full
= dfixed_const(1000);
886 b
.full
= dfixed_const(mode
->clock
);
887 b
.full
= dfixed_div(b
, a
);
888 c
.full
= dfixed_const(latency_watermark_a
);
889 c
.full
= dfixed_mul(c
, b
);
890 c
.full
= dfixed_mul(c
, radeon_crtc
->hsc
);
891 c
.full
= dfixed_div(c
, a
);
892 a
.full
= dfixed_const(16);
893 c
.full
= dfixed_div(c
, a
);
894 priority_a_mark
= dfixed_trunc(c
);
895 priority_a_cnt
|= priority_a_mark
& PRIORITY_MARK_MASK
;
897 a
.full
= dfixed_const(1000);
898 b
.full
= dfixed_const(mode
->clock
);
899 b
.full
= dfixed_div(b
, a
);
900 c
.full
= dfixed_const(latency_watermark_b
);
901 c
.full
= dfixed_mul(c
, b
);
902 c
.full
= dfixed_mul(c
, radeon_crtc
->hsc
);
903 c
.full
= dfixed_div(c
, a
);
904 a
.full
= dfixed_const(16);
905 c
.full
= dfixed_div(c
, a
);
906 priority_b_mark
= dfixed_trunc(c
);
907 priority_b_cnt
|= priority_b_mark
& PRIORITY_MARK_MASK
;
911 arb_control3
= RREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
);
913 tmp
&= ~LATENCY_WATERMARK_MASK(3);
914 tmp
|= LATENCY_WATERMARK_MASK(1);
915 WREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
, tmp
);
916 WREG32(PIPE0_LATENCY_CONTROL
+ pipe_offset
,
917 (LATENCY_LOW_WATERMARK(latency_watermark_a
) |
918 LATENCY_HIGH_WATERMARK(line_time
)));
920 tmp
= RREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
);
921 tmp
&= ~LATENCY_WATERMARK_MASK(3);
922 tmp
|= LATENCY_WATERMARK_MASK(2);
923 WREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
, tmp
);
924 WREG32(PIPE0_LATENCY_CONTROL
+ pipe_offset
,
925 (LATENCY_LOW_WATERMARK(latency_watermark_b
) |
926 LATENCY_HIGH_WATERMARK(line_time
)));
927 /* restore original selection */
928 WREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
, arb_control3
);
930 /* write the priority marks */
931 WREG32(PRIORITY_A_CNT
+ radeon_crtc
->crtc_offset
, priority_a_cnt
);
932 WREG32(PRIORITY_B_CNT
+ radeon_crtc
->crtc_offset
, priority_b_cnt
);
936 void evergreen_bandwidth_update(struct radeon_device
*rdev
)
938 struct drm_display_mode
*mode0
= NULL
;
939 struct drm_display_mode
*mode1
= NULL
;
940 u32 num_heads
= 0, lb_size
;
943 radeon_update_display_priority(rdev
);
945 for (i
= 0; i
< rdev
->num_crtc
; i
++) {
946 if (rdev
->mode_info
.crtcs
[i
]->base
.enabled
)
949 for (i
= 0; i
< rdev
->num_crtc
; i
+= 2) {
950 mode0
= &rdev
->mode_info
.crtcs
[i
]->base
.mode
;
951 mode1
= &rdev
->mode_info
.crtcs
[i
+1]->base
.mode
;
952 lb_size
= evergreen_line_buffer_adjust(rdev
, rdev
->mode_info
.crtcs
[i
], mode0
, mode1
);
953 evergreen_program_watermarks(rdev
, rdev
->mode_info
.crtcs
[i
], lb_size
, num_heads
);
954 lb_size
= evergreen_line_buffer_adjust(rdev
, rdev
->mode_info
.crtcs
[i
+1], mode1
, mode0
);
955 evergreen_program_watermarks(rdev
, rdev
->mode_info
.crtcs
[i
+1], lb_size
, num_heads
);
959 int evergreen_mc_wait_for_idle(struct radeon_device
*rdev
)
964 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
966 tmp
= RREG32(SRBM_STATUS
) & 0x1F00;
977 void evergreen_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
982 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
984 WREG32(VM_CONTEXT0_REQUEST_RESPONSE
, REQUEST_TYPE(1));
985 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
987 tmp
= RREG32(VM_CONTEXT0_REQUEST_RESPONSE
);
988 tmp
= (tmp
& RESPONSE_TYPE_MASK
) >> RESPONSE_TYPE_SHIFT
;
990 printk(KERN_WARNING
"[drm] r600 flush TLB failed\n");
1000 int evergreen_pcie_gart_enable(struct radeon_device
*rdev
)
1005 if (rdev
->gart
.robj
== NULL
) {
1006 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
1009 r
= radeon_gart_table_vram_pin(rdev
);
1012 radeon_gart_restore(rdev
);
1013 /* Setup L2 cache */
1014 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
1015 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
1016 EFFECTIVE_L2_QUEUE_SIZE(7));
1017 WREG32(VM_L2_CNTL2
, 0);
1018 WREG32(VM_L2_CNTL3
, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1019 /* Setup TLB control */
1020 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
1021 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
1022 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU
|
1023 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1024 if (rdev
->flags
& RADEON_IS_IGP
) {
1025 WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL
, tmp
);
1026 WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL
, tmp
);
1027 WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL
, tmp
);
1029 WREG32(MC_VM_MD_L1_TLB0_CNTL
, tmp
);
1030 WREG32(MC_VM_MD_L1_TLB1_CNTL
, tmp
);
1031 WREG32(MC_VM_MD_L1_TLB2_CNTL
, tmp
);
1033 WREG32(MC_VM_MB_L1_TLB0_CNTL
, tmp
);
1034 WREG32(MC_VM_MB_L1_TLB1_CNTL
, tmp
);
1035 WREG32(MC_VM_MB_L1_TLB2_CNTL
, tmp
);
1036 WREG32(MC_VM_MB_L1_TLB3_CNTL
, tmp
);
1037 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
1038 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
1039 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
1040 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
1041 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
1042 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
1043 (u32
)(rdev
->dummy_page
.addr
>> 12));
1044 WREG32(VM_CONTEXT1_CNTL
, 0);
1046 evergreen_pcie_gart_tlb_flush(rdev
);
1047 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1048 (unsigned)(rdev
->mc
.gtt_size
>> 20),
1049 (unsigned long long)rdev
->gart
.table_addr
);
1050 rdev
->gart
.ready
= true;
1054 void evergreen_pcie_gart_disable(struct radeon_device
*rdev
)
1058 /* Disable all tables */
1059 WREG32(VM_CONTEXT0_CNTL
, 0);
1060 WREG32(VM_CONTEXT1_CNTL
, 0);
1062 /* Setup L2 cache */
1063 WREG32(VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
|
1064 EFFECTIVE_L2_QUEUE_SIZE(7));
1065 WREG32(VM_L2_CNTL2
, 0);
1066 WREG32(VM_L2_CNTL3
, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1067 /* Setup TLB control */
1068 tmp
= EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1069 WREG32(MC_VM_MD_L1_TLB0_CNTL
, tmp
);
1070 WREG32(MC_VM_MD_L1_TLB1_CNTL
, tmp
);
1071 WREG32(MC_VM_MD_L1_TLB2_CNTL
, tmp
);
1072 WREG32(MC_VM_MB_L1_TLB0_CNTL
, tmp
);
1073 WREG32(MC_VM_MB_L1_TLB1_CNTL
, tmp
);
1074 WREG32(MC_VM_MB_L1_TLB2_CNTL
, tmp
);
1075 WREG32(MC_VM_MB_L1_TLB3_CNTL
, tmp
);
1076 radeon_gart_table_vram_unpin(rdev
);
1079 void evergreen_pcie_gart_fini(struct radeon_device
*rdev
)
1081 evergreen_pcie_gart_disable(rdev
);
1082 radeon_gart_table_vram_free(rdev
);
1083 radeon_gart_fini(rdev
);
1087 void evergreen_agp_enable(struct radeon_device
*rdev
)
1091 /* Setup L2 cache */
1092 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
1093 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
1094 EFFECTIVE_L2_QUEUE_SIZE(7));
1095 WREG32(VM_L2_CNTL2
, 0);
1096 WREG32(VM_L2_CNTL3
, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1097 /* Setup TLB control */
1098 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
1099 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
1100 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU
|
1101 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1102 WREG32(MC_VM_MD_L1_TLB0_CNTL
, tmp
);
1103 WREG32(MC_VM_MD_L1_TLB1_CNTL
, tmp
);
1104 WREG32(MC_VM_MD_L1_TLB2_CNTL
, tmp
);
1105 WREG32(MC_VM_MB_L1_TLB0_CNTL
, tmp
);
1106 WREG32(MC_VM_MB_L1_TLB1_CNTL
, tmp
);
1107 WREG32(MC_VM_MB_L1_TLB2_CNTL
, tmp
);
1108 WREG32(MC_VM_MB_L1_TLB3_CNTL
, tmp
);
1109 WREG32(VM_CONTEXT0_CNTL
, 0);
1110 WREG32(VM_CONTEXT1_CNTL
, 0);
1113 void evergreen_mc_stop(struct radeon_device
*rdev
, struct evergreen_mc_save
*save
)
1115 save
->vga_control
[0] = RREG32(D1VGA_CONTROL
);
1116 save
->vga_control
[1] = RREG32(D2VGA_CONTROL
);
1117 save
->vga_render_control
= RREG32(VGA_RENDER_CONTROL
);
1118 save
->vga_hdp_control
= RREG32(VGA_HDP_CONTROL
);
1119 save
->crtc_control
[0] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
);
1120 save
->crtc_control
[1] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
1121 if (rdev
->num_crtc
>= 4) {
1122 save
->vga_control
[2] = RREG32(EVERGREEN_D3VGA_CONTROL
);
1123 save
->vga_control
[3] = RREG32(EVERGREEN_D4VGA_CONTROL
);
1124 save
->crtc_control
[2] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
);
1125 save
->crtc_control
[3] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
);
1127 if (rdev
->num_crtc
>= 6) {
1128 save
->vga_control
[4] = RREG32(EVERGREEN_D5VGA_CONTROL
);
1129 save
->vga_control
[5] = RREG32(EVERGREEN_D6VGA_CONTROL
);
1130 save
->crtc_control
[4] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
);
1131 save
->crtc_control
[5] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
1134 /* Stop all video */
1135 WREG32(VGA_RENDER_CONTROL
, 0);
1136 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 1);
1137 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 1);
1138 if (rdev
->num_crtc
>= 4) {
1139 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 1);
1140 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 1);
1142 if (rdev
->num_crtc
>= 6) {
1143 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 1);
1144 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 1);
1146 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
1147 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
1148 if (rdev
->num_crtc
>= 4) {
1149 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
1150 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
1152 if (rdev
->num_crtc
>= 6) {
1153 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
1154 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
1156 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
1157 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
1158 if (rdev
->num_crtc
>= 4) {
1159 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
1160 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
1162 if (rdev
->num_crtc
>= 6) {
1163 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
1164 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
1167 WREG32(D1VGA_CONTROL
, 0);
1168 WREG32(D2VGA_CONTROL
, 0);
1169 if (rdev
->num_crtc
>= 4) {
1170 WREG32(EVERGREEN_D3VGA_CONTROL
, 0);
1171 WREG32(EVERGREEN_D4VGA_CONTROL
, 0);
1173 if (rdev
->num_crtc
>= 6) {
1174 WREG32(EVERGREEN_D5VGA_CONTROL
, 0);
1175 WREG32(EVERGREEN_D6VGA_CONTROL
, 0);
1179 void evergreen_mc_resume(struct radeon_device
*rdev
, struct evergreen_mc_save
*save
)
1181 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1182 upper_32_bits(rdev
->mc
.vram_start
));
1183 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1184 upper_32_bits(rdev
->mc
.vram_start
));
1185 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1186 (u32
)rdev
->mc
.vram_start
);
1187 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1188 (u32
)rdev
->mc
.vram_start
);
1190 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1191 upper_32_bits(rdev
->mc
.vram_start
));
1192 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1193 upper_32_bits(rdev
->mc
.vram_start
));
1194 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1195 (u32
)rdev
->mc
.vram_start
);
1196 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1197 (u32
)rdev
->mc
.vram_start
);
1199 if (rdev
->num_crtc
>= 4) {
1200 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1201 upper_32_bits(rdev
->mc
.vram_start
));
1202 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1203 upper_32_bits(rdev
->mc
.vram_start
));
1204 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1205 (u32
)rdev
->mc
.vram_start
);
1206 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1207 (u32
)rdev
->mc
.vram_start
);
1209 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1210 upper_32_bits(rdev
->mc
.vram_start
));
1211 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1212 upper_32_bits(rdev
->mc
.vram_start
));
1213 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1214 (u32
)rdev
->mc
.vram_start
);
1215 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1216 (u32
)rdev
->mc
.vram_start
);
1218 if (rdev
->num_crtc
>= 6) {
1219 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1220 upper_32_bits(rdev
->mc
.vram_start
));
1221 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1222 upper_32_bits(rdev
->mc
.vram_start
));
1223 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1224 (u32
)rdev
->mc
.vram_start
);
1225 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1226 (u32
)rdev
->mc
.vram_start
);
1228 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1229 upper_32_bits(rdev
->mc
.vram_start
));
1230 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1231 upper_32_bits(rdev
->mc
.vram_start
));
1232 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1233 (u32
)rdev
->mc
.vram_start
);
1234 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1235 (u32
)rdev
->mc
.vram_start
);
1238 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH
, upper_32_bits(rdev
->mc
.vram_start
));
1239 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS
, (u32
)rdev
->mc
.vram_start
);
1240 /* Unlock host access */
1241 WREG32(VGA_HDP_CONTROL
, save
->vga_hdp_control
);
1243 /* Restore video state */
1244 WREG32(D1VGA_CONTROL
, save
->vga_control
[0]);
1245 WREG32(D2VGA_CONTROL
, save
->vga_control
[1]);
1246 if (rdev
->num_crtc
>= 4) {
1247 WREG32(EVERGREEN_D3VGA_CONTROL
, save
->vga_control
[2]);
1248 WREG32(EVERGREEN_D4VGA_CONTROL
, save
->vga_control
[3]);
1250 if (rdev
->num_crtc
>= 6) {
1251 WREG32(EVERGREEN_D5VGA_CONTROL
, save
->vga_control
[4]);
1252 WREG32(EVERGREEN_D6VGA_CONTROL
, save
->vga_control
[5]);
1254 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 1);
1255 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 1);
1256 if (rdev
->num_crtc
>= 4) {
1257 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 1);
1258 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 1);
1260 if (rdev
->num_crtc
>= 6) {
1261 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 1);
1262 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 1);
1264 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, save
->crtc_control
[0]);
1265 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, save
->crtc_control
[1]);
1266 if (rdev
->num_crtc
>= 4) {
1267 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, save
->crtc_control
[2]);
1268 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, save
->crtc_control
[3]);
1270 if (rdev
->num_crtc
>= 6) {
1271 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, save
->crtc_control
[4]);
1272 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, save
->crtc_control
[5]);
1274 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
1275 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
1276 if (rdev
->num_crtc
>= 4) {
1277 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
1278 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
1280 if (rdev
->num_crtc
>= 6) {
1281 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
1282 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
1284 WREG32(VGA_RENDER_CONTROL
, save
->vga_render_control
);
1287 void evergreen_mc_program(struct radeon_device
*rdev
)
1289 struct evergreen_mc_save save
;
1293 /* Initialize HDP */
1294 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1295 WREG32((0x2c14 + j
), 0x00000000);
1296 WREG32((0x2c18 + j
), 0x00000000);
1297 WREG32((0x2c1c + j
), 0x00000000);
1298 WREG32((0x2c20 + j
), 0x00000000);
1299 WREG32((0x2c24 + j
), 0x00000000);
1301 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
1303 evergreen_mc_stop(rdev
, &save
);
1304 if (evergreen_mc_wait_for_idle(rdev
)) {
1305 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1307 /* Lockout access through VGA aperture*/
1308 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
1309 /* Update configuration */
1310 if (rdev
->flags
& RADEON_IS_AGP
) {
1311 if (rdev
->mc
.vram_start
< rdev
->mc
.gtt_start
) {
1312 /* VRAM before AGP */
1313 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1314 rdev
->mc
.vram_start
>> 12);
1315 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1316 rdev
->mc
.gtt_end
>> 12);
1318 /* VRAM after AGP */
1319 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1320 rdev
->mc
.gtt_start
>> 12);
1321 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1322 rdev
->mc
.vram_end
>> 12);
1325 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1326 rdev
->mc
.vram_start
>> 12);
1327 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1328 rdev
->mc
.vram_end
>> 12);
1330 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
, rdev
->vram_scratch
.gpu_addr
>> 12);
1331 if (rdev
->flags
& RADEON_IS_IGP
) {
1332 tmp
= RREG32(MC_FUS_VM_FB_OFFSET
) & 0x000FFFFF;
1333 tmp
|= ((rdev
->mc
.vram_end
>> 20) & 0xF) << 24;
1334 tmp
|= ((rdev
->mc
.vram_start
>> 20) & 0xF) << 20;
1335 WREG32(MC_FUS_VM_FB_OFFSET
, tmp
);
1337 tmp
= ((rdev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
1338 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
1339 WREG32(MC_VM_FB_LOCATION
, tmp
);
1340 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
1341 WREG32(HDP_NONSURFACE_INFO
, (2 << 7) | (1 << 30));
1342 WREG32(HDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
1343 if (rdev
->flags
& RADEON_IS_AGP
) {
1344 WREG32(MC_VM_AGP_TOP
, rdev
->mc
.gtt_end
>> 16);
1345 WREG32(MC_VM_AGP_BOT
, rdev
->mc
.gtt_start
>> 16);
1346 WREG32(MC_VM_AGP_BASE
, rdev
->mc
.agp_base
>> 22);
1348 WREG32(MC_VM_AGP_BASE
, 0);
1349 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
1350 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
1352 if (evergreen_mc_wait_for_idle(rdev
)) {
1353 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1355 evergreen_mc_resume(rdev
, &save
);
1356 /* we need to own VRAM, so turn off the VGA renderer here
1357 * to stop it overwriting our objects */
1358 rv515_vga_render_disable(rdev
);
1364 void evergreen_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
1366 struct radeon_ring
*ring
= &rdev
->ring
[ib
->fence
->ring
];
1368 /* set to DX10/11 mode */
1369 radeon_ring_write(ring
, PACKET3(PACKET3_MODE_CONTROL
, 0));
1370 radeon_ring_write(ring
, 1);
1371 /* FIXME: implement */
1372 radeon_ring_write(ring
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
1373 radeon_ring_write(ring
,
1377 (ib
->gpu_addr
& 0xFFFFFFFC));
1378 radeon_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xFF);
1379 radeon_ring_write(ring
, ib
->length_dw
);
1383 static int evergreen_cp_load_microcode(struct radeon_device
*rdev
)
1385 const __be32
*fw_data
;
1388 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
1396 RB_NO_UPDATE
| RB_BLKSZ(15) | RB_BUFSZ(3));
1398 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
1399 WREG32(CP_PFP_UCODE_ADDR
, 0);
1400 for (i
= 0; i
< EVERGREEN_PFP_UCODE_SIZE
; i
++)
1401 WREG32(CP_PFP_UCODE_DATA
, be32_to_cpup(fw_data
++));
1402 WREG32(CP_PFP_UCODE_ADDR
, 0);
1404 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
1405 WREG32(CP_ME_RAM_WADDR
, 0);
1406 for (i
= 0; i
< EVERGREEN_PM4_UCODE_SIZE
; i
++)
1407 WREG32(CP_ME_RAM_DATA
, be32_to_cpup(fw_data
++));
1409 WREG32(CP_PFP_UCODE_ADDR
, 0);
1410 WREG32(CP_ME_RAM_WADDR
, 0);
1411 WREG32(CP_ME_RAM_RADDR
, 0);
1415 static int evergreen_cp_start(struct radeon_device
*rdev
)
1417 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
1421 r
= radeon_ring_lock(rdev
, ring
, 7);
1423 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1426 radeon_ring_write(ring
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
1427 radeon_ring_write(ring
, 0x1);
1428 radeon_ring_write(ring
, 0x0);
1429 radeon_ring_write(ring
, rdev
->config
.evergreen
.max_hw_contexts
- 1);
1430 radeon_ring_write(ring
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1431 radeon_ring_write(ring
, 0);
1432 radeon_ring_write(ring
, 0);
1433 radeon_ring_unlock_commit(rdev
, ring
);
1436 WREG32(CP_ME_CNTL
, cp_me
);
1438 r
= radeon_ring_lock(rdev
, ring
, evergreen_default_size
+ 19);
1440 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1444 /* setup clear context state */
1445 radeon_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
1446 radeon_ring_write(ring
, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE
);
1448 for (i
= 0; i
< evergreen_default_size
; i
++)
1449 radeon_ring_write(ring
, evergreen_default_state
[i
]);
1451 radeon_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
1452 radeon_ring_write(ring
, PACKET3_PREAMBLE_END_CLEAR_STATE
);
1454 /* set clear context state */
1455 radeon_ring_write(ring
, PACKET3(PACKET3_CLEAR_STATE
, 0));
1456 radeon_ring_write(ring
, 0);
1458 /* SQ_VTX_BASE_VTX_LOC */
1459 radeon_ring_write(ring
, 0xc0026f00);
1460 radeon_ring_write(ring
, 0x00000000);
1461 radeon_ring_write(ring
, 0x00000000);
1462 radeon_ring_write(ring
, 0x00000000);
1465 radeon_ring_write(ring
, 0xc0036f00);
1466 radeon_ring_write(ring
, 0x00000bc4);
1467 radeon_ring_write(ring
, 0xffffffff);
1468 radeon_ring_write(ring
, 0xffffffff);
1469 radeon_ring_write(ring
, 0xffffffff);
1471 radeon_ring_write(ring
, 0xc0026900);
1472 radeon_ring_write(ring
, 0x00000316);
1473 radeon_ring_write(ring
, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1474 radeon_ring_write(ring
, 0x00000010); /* */
1476 radeon_ring_unlock_commit(rdev
, ring
);
1481 int evergreen_cp_resume(struct radeon_device
*rdev
)
1483 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
1488 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1489 WREG32(GRBM_SOFT_RESET
, (SOFT_RESET_CP
|
1495 RREG32(GRBM_SOFT_RESET
);
1497 WREG32(GRBM_SOFT_RESET
, 0);
1498 RREG32(GRBM_SOFT_RESET
);
1500 /* Set ring buffer size */
1501 rb_bufsz
= drm_order(ring
->ring_size
/ 8);
1502 tmp
= (drm_order(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
1504 tmp
|= BUF_SWAP_32BIT
;
1506 WREG32(CP_RB_CNTL
, tmp
);
1507 WREG32(CP_SEM_WAIT_TIMER
, 0x0);
1508 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL
, 0x0);
1510 /* Set the write pointer delay */
1511 WREG32(CP_RB_WPTR_DELAY
, 0);
1513 /* Initialize the ring buffer's read and write pointers */
1514 WREG32(CP_RB_CNTL
, tmp
| RB_RPTR_WR_ENA
);
1515 WREG32(CP_RB_RPTR_WR
, 0);
1517 WREG32(CP_RB_WPTR
, ring
->wptr
);
1519 /* set the wb address wether it's enabled or not */
1520 WREG32(CP_RB_RPTR_ADDR
,
1521 ((rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFFFFFFFC));
1522 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFF);
1523 WREG32(SCRATCH_ADDR
, ((rdev
->wb
.gpu_addr
+ RADEON_WB_SCRATCH_OFFSET
) >> 8) & 0xFFFFFFFF);
1525 if (rdev
->wb
.enabled
)
1526 WREG32(SCRATCH_UMSK
, 0xff);
1528 tmp
|= RB_NO_UPDATE
;
1529 WREG32(SCRATCH_UMSK
, 0);
1533 WREG32(CP_RB_CNTL
, tmp
);
1535 WREG32(CP_RB_BASE
, ring
->gpu_addr
>> 8);
1536 WREG32(CP_DEBUG
, (1 << 27) | (1 << 28));
1538 ring
->rptr
= RREG32(CP_RB_RPTR
);
1540 evergreen_cp_start(rdev
);
1542 r
= radeon_ring_test(rdev
, ring
);
1544 ring
->ready
= false;
1553 static u32
evergreen_get_tile_pipe_to_backend_map(struct radeon_device
*rdev
,
1556 u32 backend_disable_mask
)
1558 u32 backend_map
= 0;
1559 u32 enabled_backends_mask
= 0;
1560 u32 enabled_backends_count
= 0;
1562 u32 swizzle_pipe
[EVERGREEN_MAX_PIPES
];
1563 u32 cur_backend
= 0;
1565 bool force_no_swizzle
;
1567 if (num_tile_pipes
> EVERGREEN_MAX_PIPES
)
1568 num_tile_pipes
= EVERGREEN_MAX_PIPES
;
1569 if (num_tile_pipes
< 1)
1571 if (num_backends
> EVERGREEN_MAX_BACKENDS
)
1572 num_backends
= EVERGREEN_MAX_BACKENDS
;
1573 if (num_backends
< 1)
1576 for (i
= 0; i
< EVERGREEN_MAX_BACKENDS
; ++i
) {
1577 if (((backend_disable_mask
>> i
) & 1) == 0) {
1578 enabled_backends_mask
|= (1 << i
);
1579 ++enabled_backends_count
;
1581 if (enabled_backends_count
== num_backends
)
1585 if (enabled_backends_count
== 0) {
1586 enabled_backends_mask
= 1;
1587 enabled_backends_count
= 1;
1590 if (enabled_backends_count
!= num_backends
)
1591 num_backends
= enabled_backends_count
;
1593 memset((uint8_t *)&swizzle_pipe
[0], 0, sizeof(u32
) * EVERGREEN_MAX_PIPES
);
1594 switch (rdev
->family
) {
1602 force_no_swizzle
= false;
1609 force_no_swizzle
= true;
1612 if (force_no_swizzle
) {
1613 bool last_backend_enabled
= false;
1615 force_no_swizzle
= false;
1616 for (i
= 0; i
< EVERGREEN_MAX_BACKENDS
; ++i
) {
1617 if (((enabled_backends_mask
>> i
) & 1) == 1) {
1618 if (last_backend_enabled
)
1619 force_no_swizzle
= true;
1620 last_backend_enabled
= true;
1622 last_backend_enabled
= false;
1626 switch (num_tile_pipes
) {
1631 DRM_ERROR("odd number of pipes!\n");
1634 swizzle_pipe
[0] = 0;
1635 swizzle_pipe
[1] = 1;
1638 if (force_no_swizzle
) {
1639 swizzle_pipe
[0] = 0;
1640 swizzle_pipe
[1] = 1;
1641 swizzle_pipe
[2] = 2;
1642 swizzle_pipe
[3] = 3;
1644 swizzle_pipe
[0] = 0;
1645 swizzle_pipe
[1] = 2;
1646 swizzle_pipe
[2] = 1;
1647 swizzle_pipe
[3] = 3;
1651 if (force_no_swizzle
) {
1652 swizzle_pipe
[0] = 0;
1653 swizzle_pipe
[1] = 1;
1654 swizzle_pipe
[2] = 2;
1655 swizzle_pipe
[3] = 3;
1656 swizzle_pipe
[4] = 4;
1657 swizzle_pipe
[5] = 5;
1659 swizzle_pipe
[0] = 0;
1660 swizzle_pipe
[1] = 2;
1661 swizzle_pipe
[2] = 4;
1662 swizzle_pipe
[3] = 1;
1663 swizzle_pipe
[4] = 3;
1664 swizzle_pipe
[5] = 5;
1668 if (force_no_swizzle
) {
1669 swizzle_pipe
[0] = 0;
1670 swizzle_pipe
[1] = 1;
1671 swizzle_pipe
[2] = 2;
1672 swizzle_pipe
[3] = 3;
1673 swizzle_pipe
[4] = 4;
1674 swizzle_pipe
[5] = 5;
1675 swizzle_pipe
[6] = 6;
1676 swizzle_pipe
[7] = 7;
1678 swizzle_pipe
[0] = 0;
1679 swizzle_pipe
[1] = 2;
1680 swizzle_pipe
[2] = 4;
1681 swizzle_pipe
[3] = 6;
1682 swizzle_pipe
[4] = 1;
1683 swizzle_pipe
[5] = 3;
1684 swizzle_pipe
[6] = 5;
1685 swizzle_pipe
[7] = 7;
1690 for (cur_pipe
= 0; cur_pipe
< num_tile_pipes
; ++cur_pipe
) {
1691 while (((1 << cur_backend
) & enabled_backends_mask
) == 0)
1692 cur_backend
= (cur_backend
+ 1) % EVERGREEN_MAX_BACKENDS
;
1694 backend_map
|= (((cur_backend
& 0xf) << (swizzle_pipe
[cur_pipe
] * 4)));
1696 cur_backend
= (cur_backend
+ 1) % EVERGREEN_MAX_BACKENDS
;
1702 static void evergreen_gpu_init(struct radeon_device
*rdev
)
1704 u32 cc_rb_backend_disable
= 0;
1705 u32 cc_gc_shader_pipe_config
;
1706 u32 gb_addr_config
= 0;
1707 u32 mc_shared_chmap
, mc_arb_ramcfg
;
1713 u32 sq_lds_resource_mgmt
;
1714 u32 sq_gpr_resource_mgmt_1
;
1715 u32 sq_gpr_resource_mgmt_2
;
1716 u32 sq_gpr_resource_mgmt_3
;
1717 u32 sq_thread_resource_mgmt
;
1718 u32 sq_thread_resource_mgmt_2
;
1719 u32 sq_stack_resource_mgmt_1
;
1720 u32 sq_stack_resource_mgmt_2
;
1721 u32 sq_stack_resource_mgmt_3
;
1722 u32 vgt_cache_invalidation
;
1723 u32 hdp_host_path_cntl
, tmp
;
1724 int i
, j
, num_shader_engines
, ps_thread_count
;
1726 switch (rdev
->family
) {
1729 rdev
->config
.evergreen
.num_ses
= 2;
1730 rdev
->config
.evergreen
.max_pipes
= 4;
1731 rdev
->config
.evergreen
.max_tile_pipes
= 8;
1732 rdev
->config
.evergreen
.max_simds
= 10;
1733 rdev
->config
.evergreen
.max_backends
= 4 * rdev
->config
.evergreen
.num_ses
;
1734 rdev
->config
.evergreen
.max_gprs
= 256;
1735 rdev
->config
.evergreen
.max_threads
= 248;
1736 rdev
->config
.evergreen
.max_gs_threads
= 32;
1737 rdev
->config
.evergreen
.max_stack_entries
= 512;
1738 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1739 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1740 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1741 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1742 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1743 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1745 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1746 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1747 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1750 rdev
->config
.evergreen
.num_ses
= 1;
1751 rdev
->config
.evergreen
.max_pipes
= 4;
1752 rdev
->config
.evergreen
.max_tile_pipes
= 4;
1753 rdev
->config
.evergreen
.max_simds
= 10;
1754 rdev
->config
.evergreen
.max_backends
= 4 * rdev
->config
.evergreen
.num_ses
;
1755 rdev
->config
.evergreen
.max_gprs
= 256;
1756 rdev
->config
.evergreen
.max_threads
= 248;
1757 rdev
->config
.evergreen
.max_gs_threads
= 32;
1758 rdev
->config
.evergreen
.max_stack_entries
= 512;
1759 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1760 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1761 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1762 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1763 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1764 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1766 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1767 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1768 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1771 rdev
->config
.evergreen
.num_ses
= 1;
1772 rdev
->config
.evergreen
.max_pipes
= 4;
1773 rdev
->config
.evergreen
.max_tile_pipes
= 4;
1774 rdev
->config
.evergreen
.max_simds
= 5;
1775 rdev
->config
.evergreen
.max_backends
= 2 * rdev
->config
.evergreen
.num_ses
;
1776 rdev
->config
.evergreen
.max_gprs
= 256;
1777 rdev
->config
.evergreen
.max_threads
= 248;
1778 rdev
->config
.evergreen
.max_gs_threads
= 32;
1779 rdev
->config
.evergreen
.max_stack_entries
= 256;
1780 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1781 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1782 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1783 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1784 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1785 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1787 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1788 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1789 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1793 rdev
->config
.evergreen
.num_ses
= 1;
1794 rdev
->config
.evergreen
.max_pipes
= 2;
1795 rdev
->config
.evergreen
.max_tile_pipes
= 2;
1796 rdev
->config
.evergreen
.max_simds
= 2;
1797 rdev
->config
.evergreen
.max_backends
= 1 * rdev
->config
.evergreen
.num_ses
;
1798 rdev
->config
.evergreen
.max_gprs
= 256;
1799 rdev
->config
.evergreen
.max_threads
= 192;
1800 rdev
->config
.evergreen
.max_gs_threads
= 16;
1801 rdev
->config
.evergreen
.max_stack_entries
= 256;
1802 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1803 rdev
->config
.evergreen
.sx_max_export_size
= 128;
1804 rdev
->config
.evergreen
.sx_max_export_pos_size
= 32;
1805 rdev
->config
.evergreen
.sx_max_export_smx_size
= 96;
1806 rdev
->config
.evergreen
.max_hw_contexts
= 4;
1807 rdev
->config
.evergreen
.sq_num_cf_insts
= 1;
1809 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1810 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1811 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1814 rdev
->config
.evergreen
.num_ses
= 1;
1815 rdev
->config
.evergreen
.max_pipes
= 2;
1816 rdev
->config
.evergreen
.max_tile_pipes
= 2;
1817 rdev
->config
.evergreen
.max_simds
= 2;
1818 rdev
->config
.evergreen
.max_backends
= 1 * rdev
->config
.evergreen
.num_ses
;
1819 rdev
->config
.evergreen
.max_gprs
= 256;
1820 rdev
->config
.evergreen
.max_threads
= 192;
1821 rdev
->config
.evergreen
.max_gs_threads
= 16;
1822 rdev
->config
.evergreen
.max_stack_entries
= 256;
1823 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1824 rdev
->config
.evergreen
.sx_max_export_size
= 128;
1825 rdev
->config
.evergreen
.sx_max_export_pos_size
= 32;
1826 rdev
->config
.evergreen
.sx_max_export_smx_size
= 96;
1827 rdev
->config
.evergreen
.max_hw_contexts
= 4;
1828 rdev
->config
.evergreen
.sq_num_cf_insts
= 1;
1830 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1831 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1832 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1835 rdev
->config
.evergreen
.num_ses
= 1;
1836 rdev
->config
.evergreen
.max_pipes
= 4;
1837 rdev
->config
.evergreen
.max_tile_pipes
= 2;
1838 if (rdev
->pdev
->device
== 0x9648)
1839 rdev
->config
.evergreen
.max_simds
= 3;
1840 else if ((rdev
->pdev
->device
== 0x9647) ||
1841 (rdev
->pdev
->device
== 0x964a))
1842 rdev
->config
.evergreen
.max_simds
= 4;
1844 rdev
->config
.evergreen
.max_simds
= 5;
1845 rdev
->config
.evergreen
.max_backends
= 2 * rdev
->config
.evergreen
.num_ses
;
1846 rdev
->config
.evergreen
.max_gprs
= 256;
1847 rdev
->config
.evergreen
.max_threads
= 248;
1848 rdev
->config
.evergreen
.max_gs_threads
= 32;
1849 rdev
->config
.evergreen
.max_stack_entries
= 256;
1850 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1851 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1852 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1853 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1854 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1855 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1857 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1858 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1859 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1862 rdev
->config
.evergreen
.num_ses
= 1;
1863 rdev
->config
.evergreen
.max_pipes
= 4;
1864 rdev
->config
.evergreen
.max_tile_pipes
= 4;
1865 rdev
->config
.evergreen
.max_simds
= 2;
1866 rdev
->config
.evergreen
.max_backends
= 1 * rdev
->config
.evergreen
.num_ses
;
1867 rdev
->config
.evergreen
.max_gprs
= 256;
1868 rdev
->config
.evergreen
.max_threads
= 248;
1869 rdev
->config
.evergreen
.max_gs_threads
= 32;
1870 rdev
->config
.evergreen
.max_stack_entries
= 512;
1871 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1872 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1873 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1874 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1875 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1876 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1878 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1879 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1880 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1883 rdev
->config
.evergreen
.num_ses
= 2;
1884 rdev
->config
.evergreen
.max_pipes
= 4;
1885 rdev
->config
.evergreen
.max_tile_pipes
= 8;
1886 rdev
->config
.evergreen
.max_simds
= 7;
1887 rdev
->config
.evergreen
.max_backends
= 4 * rdev
->config
.evergreen
.num_ses
;
1888 rdev
->config
.evergreen
.max_gprs
= 256;
1889 rdev
->config
.evergreen
.max_threads
= 248;
1890 rdev
->config
.evergreen
.max_gs_threads
= 32;
1891 rdev
->config
.evergreen
.max_stack_entries
= 512;
1892 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1893 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1894 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1895 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1896 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1897 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1899 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1900 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1901 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1904 rdev
->config
.evergreen
.num_ses
= 1;
1905 rdev
->config
.evergreen
.max_pipes
= 4;
1906 rdev
->config
.evergreen
.max_tile_pipes
= 4;
1907 rdev
->config
.evergreen
.max_simds
= 6;
1908 rdev
->config
.evergreen
.max_backends
= 2 * rdev
->config
.evergreen
.num_ses
;
1909 rdev
->config
.evergreen
.max_gprs
= 256;
1910 rdev
->config
.evergreen
.max_threads
= 248;
1911 rdev
->config
.evergreen
.max_gs_threads
= 32;
1912 rdev
->config
.evergreen
.max_stack_entries
= 256;
1913 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1914 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1915 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1916 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1917 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1918 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1920 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1921 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1922 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1925 rdev
->config
.evergreen
.num_ses
= 1;
1926 rdev
->config
.evergreen
.max_pipes
= 4;
1927 rdev
->config
.evergreen
.max_tile_pipes
= 2;
1928 rdev
->config
.evergreen
.max_simds
= 2;
1929 rdev
->config
.evergreen
.max_backends
= 1 * rdev
->config
.evergreen
.num_ses
;
1930 rdev
->config
.evergreen
.max_gprs
= 256;
1931 rdev
->config
.evergreen
.max_threads
= 192;
1932 rdev
->config
.evergreen
.max_gs_threads
= 16;
1933 rdev
->config
.evergreen
.max_stack_entries
= 256;
1934 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1935 rdev
->config
.evergreen
.sx_max_export_size
= 128;
1936 rdev
->config
.evergreen
.sx_max_export_pos_size
= 32;
1937 rdev
->config
.evergreen
.sx_max_export_smx_size
= 96;
1938 rdev
->config
.evergreen
.max_hw_contexts
= 4;
1939 rdev
->config
.evergreen
.sq_num_cf_insts
= 1;
1941 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1942 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1943 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1947 /* Initialize HDP */
1948 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1949 WREG32((0x2c14 + j
), 0x00000000);
1950 WREG32((0x2c18 + j
), 0x00000000);
1951 WREG32((0x2c1c + j
), 0x00000000);
1952 WREG32((0x2c20 + j
), 0x00000000);
1953 WREG32((0x2c24 + j
), 0x00000000);
1956 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
1958 evergreen_fix_pci_max_read_req_size(rdev
);
1960 cc_gc_shader_pipe_config
= RREG32(CC_GC_SHADER_PIPE_CONFIG
) & ~2;
1962 cc_gc_shader_pipe_config
|=
1963 INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK
<< rdev
->config
.evergreen
.max_pipes
)
1964 & EVERGREEN_MAX_PIPES_MASK
);
1965 cc_gc_shader_pipe_config
|=
1966 INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK
<< rdev
->config
.evergreen
.max_simds
)
1967 & EVERGREEN_MAX_SIMDS_MASK
);
1969 cc_rb_backend_disable
=
1970 BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK
<< rdev
->config
.evergreen
.max_backends
)
1971 & EVERGREEN_MAX_BACKENDS_MASK
);
1974 mc_shared_chmap
= RREG32(MC_SHARED_CHMAP
);
1975 if (rdev
->flags
& RADEON_IS_IGP
)
1976 mc_arb_ramcfg
= RREG32(FUS_MC_ARB_RAMCFG
);
1978 mc_arb_ramcfg
= RREG32(MC_ARB_RAMCFG
);
1980 switch (rdev
->config
.evergreen
.max_tile_pipes
) {
1983 gb_addr_config
|= NUM_PIPES(0);
1986 gb_addr_config
|= NUM_PIPES(1);
1989 gb_addr_config
|= NUM_PIPES(2);
1992 gb_addr_config
|= NUM_PIPES(3);
1996 gb_addr_config
|= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
);
1997 gb_addr_config
|= BANK_INTERLEAVE_SIZE(0);
1998 gb_addr_config
|= NUM_SHADER_ENGINES(rdev
->config
.evergreen
.num_ses
- 1);
1999 gb_addr_config
|= SHADER_ENGINE_TILE_SIZE(1);
2000 gb_addr_config
|= NUM_GPUS(0); /* Hemlock? */
2001 gb_addr_config
|= MULTI_GPU_TILE_SIZE(2);
2003 if (((mc_arb_ramcfg
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
) > 2)
2004 gb_addr_config
|= ROW_SIZE(2);
2006 gb_addr_config
|= ROW_SIZE((mc_arb_ramcfg
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
);
2008 if (rdev
->ddev
->pdev
->device
== 0x689e) {
2011 u8 efuse_box_bit_131_124
;
2013 WREG32(RCU_IND_INDEX
, 0x204);
2014 efuse_straps_4
= RREG32(RCU_IND_DATA
);
2015 WREG32(RCU_IND_INDEX
, 0x203);
2016 efuse_straps_3
= RREG32(RCU_IND_DATA
);
2017 efuse_box_bit_131_124
= (u8
)(((efuse_straps_4
& 0xf) << 4) | ((efuse_straps_3
& 0xf0000000) >> 28));
2019 switch(efuse_box_bit_131_124
) {
2021 gb_backend_map
= 0x76543210;
2024 gb_backend_map
= 0x77553311;
2027 gb_backend_map
= 0x77553300;
2030 gb_backend_map
= 0x77552211;
2033 gb_backend_map
= 0x77443300;
2036 gb_backend_map
= 0x66552211;
2039 gb_backend_map
= 0x77552200;
2042 gb_backend_map
= 0x66442200;
2045 gb_backend_map
= 0x66553311;
2048 DRM_ERROR("bad backend map, using default\n");
2050 evergreen_get_tile_pipe_to_backend_map(rdev
,
2051 rdev
->config
.evergreen
.max_tile_pipes
,
2052 rdev
->config
.evergreen
.max_backends
,
2053 ((EVERGREEN_MAX_BACKENDS_MASK
<<
2054 rdev
->config
.evergreen
.max_backends
) &
2055 EVERGREEN_MAX_BACKENDS_MASK
));
2058 } else if (rdev
->ddev
->pdev
->device
== 0x68b9) {
2060 u8 efuse_box_bit_127_124
;
2062 WREG32(RCU_IND_INDEX
, 0x203);
2063 efuse_straps_3
= RREG32(RCU_IND_DATA
);
2064 efuse_box_bit_127_124
= (u8
)((efuse_straps_3
& 0xF0000000) >> 28);
2066 switch(efuse_box_bit_127_124
) {
2068 gb_backend_map
= 0x00003210;
2074 gb_backend_map
= 0x00003311;
2077 DRM_ERROR("bad backend map, using default\n");
2079 evergreen_get_tile_pipe_to_backend_map(rdev
,
2080 rdev
->config
.evergreen
.max_tile_pipes
,
2081 rdev
->config
.evergreen
.max_backends
,
2082 ((EVERGREEN_MAX_BACKENDS_MASK
<<
2083 rdev
->config
.evergreen
.max_backends
) &
2084 EVERGREEN_MAX_BACKENDS_MASK
));
2088 switch (rdev
->family
) {
2092 gb_backend_map
= 0x66442200;
2095 gb_backend_map
= 0x00002200;
2099 evergreen_get_tile_pipe_to_backend_map(rdev
,
2100 rdev
->config
.evergreen
.max_tile_pipes
,
2101 rdev
->config
.evergreen
.max_backends
,
2102 ((EVERGREEN_MAX_BACKENDS_MASK
<<
2103 rdev
->config
.evergreen
.max_backends
) &
2104 EVERGREEN_MAX_BACKENDS_MASK
));
2108 /* setup tiling info dword. gb_addr_config is not adequate since it does
2109 * not have bank info, so create a custom tiling dword.
2110 * bits 3:0 num_pipes
2111 * bits 7:4 num_banks
2112 * bits 11:8 group_size
2113 * bits 15:12 row_size
2115 rdev
->config
.evergreen
.tile_config
= 0;
2116 switch (rdev
->config
.evergreen
.max_tile_pipes
) {
2119 rdev
->config
.evergreen
.tile_config
|= (0 << 0);
2122 rdev
->config
.evergreen
.tile_config
|= (1 << 0);
2125 rdev
->config
.evergreen
.tile_config
|= (2 << 0);
2128 rdev
->config
.evergreen
.tile_config
|= (3 << 0);
2131 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
2132 if (rdev
->flags
& RADEON_IS_IGP
)
2133 rdev
->config
.evergreen
.tile_config
|= 1 << 4;
2135 rdev
->config
.evergreen
.tile_config
|=
2136 ((mc_arb_ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
) << 4;
2137 rdev
->config
.evergreen
.tile_config
|=
2138 ((mc_arb_ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
) << 8;
2139 rdev
->config
.evergreen
.tile_config
|=
2140 ((gb_addr_config
& 0x30000000) >> 28) << 12;
2142 rdev
->config
.evergreen
.backend_map
= gb_backend_map
;
2143 WREG32(GB_BACKEND_MAP
, gb_backend_map
);
2144 WREG32(GB_ADDR_CONFIG
, gb_addr_config
);
2145 WREG32(DMIF_ADDR_CONFIG
, gb_addr_config
);
2146 WREG32(HDP_ADDR_CONFIG
, gb_addr_config
);
2148 num_shader_engines
= ((RREG32(GB_ADDR_CONFIG
) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
2149 grbm_gfx_index
= INSTANCE_BROADCAST_WRITES
;
2151 for (i
= 0; i
< rdev
->config
.evergreen
.num_ses
; i
++) {
2152 u32 rb
= cc_rb_backend_disable
| (0xf0 << 16);
2153 u32 sp
= cc_gc_shader_pipe_config
;
2154 u32 gfx
= grbm_gfx_index
| SE_INDEX(i
);
2156 if (i
== num_shader_engines
) {
2157 rb
|= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK
);
2158 sp
|= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK
);
2161 WREG32(GRBM_GFX_INDEX
, gfx
);
2162 WREG32(RLC_GFX_INDEX
, gfx
);
2164 WREG32(CC_RB_BACKEND_DISABLE
, rb
);
2165 WREG32(CC_SYS_RB_BACKEND_DISABLE
, rb
);
2166 WREG32(GC_USER_RB_BACKEND_DISABLE
, rb
);
2167 WREG32(CC_GC_SHADER_PIPE_CONFIG
, sp
);
2170 grbm_gfx_index
|= SE_BROADCAST_WRITES
;
2171 WREG32(GRBM_GFX_INDEX
, grbm_gfx_index
);
2172 WREG32(RLC_GFX_INDEX
, grbm_gfx_index
);
2174 WREG32(CGTS_SYS_TCC_DISABLE
, 0);
2175 WREG32(CGTS_TCC_DISABLE
, 0);
2176 WREG32(CGTS_USER_SYS_TCC_DISABLE
, 0);
2177 WREG32(CGTS_USER_TCC_DISABLE
, 0);
2179 /* set HW defaults for 3D engine */
2180 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) |
2181 ROQ_IB2_START(0x2b)));
2183 WREG32(CP_MEQ_THRESHOLDS
, STQ_SPLIT(0x30));
2185 WREG32(TA_CNTL_AUX
, (DISABLE_CUBE_ANISO
|
2190 sx_debug_1
= RREG32(SX_DEBUG_1
);
2191 sx_debug_1
|= ENABLE_NEW_SMX_ADDRESS
;
2192 WREG32(SX_DEBUG_1
, sx_debug_1
);
2195 smx_dc_ctl0
= RREG32(SMX_DC_CTL0
);
2196 smx_dc_ctl0
&= ~NUMBER_OF_SETS(0x1ff);
2197 smx_dc_ctl0
|= NUMBER_OF_SETS(rdev
->config
.evergreen
.sx_num_of_sets
);
2198 WREG32(SMX_DC_CTL0
, smx_dc_ctl0
);
2200 WREG32(SX_EXPORT_BUFFER_SIZES
, (COLOR_BUFFER_SIZE((rdev
->config
.evergreen
.sx_max_export_size
/ 4) - 1) |
2201 POSITION_BUFFER_SIZE((rdev
->config
.evergreen
.sx_max_export_pos_size
/ 4) - 1) |
2202 SMX_BUFFER_SIZE((rdev
->config
.evergreen
.sx_max_export_smx_size
/ 4) - 1)));
2204 WREG32(PA_SC_FIFO_SIZE
, (SC_PRIM_FIFO_SIZE(rdev
->config
.evergreen
.sc_prim_fifo_size
) |
2205 SC_HIZ_TILE_FIFO_SIZE(rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
) |
2206 SC_EARLYZ_TILE_FIFO_SIZE(rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
)));
2208 WREG32(VGT_NUM_INSTANCES
, 1);
2209 WREG32(SPI_CONFIG_CNTL
, 0);
2210 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(4));
2211 WREG32(CP_PERFMON_CNTL
, 0);
2213 WREG32(SQ_MS_FIFO_SIZES
, (CACHE_FIFO_SIZE(16 * rdev
->config
.evergreen
.sq_num_cf_insts
) |
2214 FETCH_FIFO_HIWATER(0x4) |
2215 DONE_FIFO_HIWATER(0xe0) |
2216 ALU_UPDATE_FIFO_HIWATER(0x8)));
2218 sq_config
= RREG32(SQ_CONFIG
);
2219 sq_config
&= ~(PS_PRIO(3) |
2223 sq_config
|= (VC_ENABLE
|
2230 switch (rdev
->family
) {
2236 /* no vertex cache */
2237 sq_config
&= ~VC_ENABLE
;
2243 sq_lds_resource_mgmt
= RREG32(SQ_LDS_RESOURCE_MGMT
);
2245 sq_gpr_resource_mgmt_1
= NUM_PS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2))* 12 / 32);
2246 sq_gpr_resource_mgmt_1
|= NUM_VS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 6 / 32);
2247 sq_gpr_resource_mgmt_1
|= NUM_CLAUSE_TEMP_GPRS(4);
2248 sq_gpr_resource_mgmt_2
= NUM_GS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 4 / 32);
2249 sq_gpr_resource_mgmt_2
|= NUM_ES_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 4 / 32);
2250 sq_gpr_resource_mgmt_3
= NUM_HS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 3 / 32);
2251 sq_gpr_resource_mgmt_3
|= NUM_LS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 3 / 32);
2253 switch (rdev
->family
) {
2258 ps_thread_count
= 96;
2261 ps_thread_count
= 128;
2265 sq_thread_resource_mgmt
= NUM_PS_THREADS(ps_thread_count
);
2266 sq_thread_resource_mgmt
|= NUM_VS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
2267 sq_thread_resource_mgmt
|= NUM_GS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
2268 sq_thread_resource_mgmt
|= NUM_ES_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
2269 sq_thread_resource_mgmt_2
= NUM_HS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
2270 sq_thread_resource_mgmt_2
|= NUM_LS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
2272 sq_stack_resource_mgmt_1
= NUM_PS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2273 sq_stack_resource_mgmt_1
|= NUM_VS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2274 sq_stack_resource_mgmt_2
= NUM_GS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2275 sq_stack_resource_mgmt_2
|= NUM_ES_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2276 sq_stack_resource_mgmt_3
= NUM_HS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2277 sq_stack_resource_mgmt_3
|= NUM_LS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2279 WREG32(SQ_CONFIG
, sq_config
);
2280 WREG32(SQ_GPR_RESOURCE_MGMT_1
, sq_gpr_resource_mgmt_1
);
2281 WREG32(SQ_GPR_RESOURCE_MGMT_2
, sq_gpr_resource_mgmt_2
);
2282 WREG32(SQ_GPR_RESOURCE_MGMT_3
, sq_gpr_resource_mgmt_3
);
2283 WREG32(SQ_THREAD_RESOURCE_MGMT
, sq_thread_resource_mgmt
);
2284 WREG32(SQ_THREAD_RESOURCE_MGMT_2
, sq_thread_resource_mgmt_2
);
2285 WREG32(SQ_STACK_RESOURCE_MGMT_1
, sq_stack_resource_mgmt_1
);
2286 WREG32(SQ_STACK_RESOURCE_MGMT_2
, sq_stack_resource_mgmt_2
);
2287 WREG32(SQ_STACK_RESOURCE_MGMT_3
, sq_stack_resource_mgmt_3
);
2288 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
, 0);
2289 WREG32(SQ_LDS_RESOURCE_MGMT
, sq_lds_resource_mgmt
);
2291 WREG32(PA_SC_FORCE_EOV_MAX_CNTS
, (FORCE_EOV_MAX_CLK_CNT(4095) |
2292 FORCE_EOV_MAX_REZ_CNT(255)));
2294 switch (rdev
->family
) {
2300 vgt_cache_invalidation
= CACHE_INVALIDATION(TC_ONLY
);
2303 vgt_cache_invalidation
= CACHE_INVALIDATION(VC_AND_TC
);
2306 vgt_cache_invalidation
|= AUTO_INVLD_EN(ES_AND_GS_AUTO
);
2307 WREG32(VGT_CACHE_INVALIDATION
, vgt_cache_invalidation
);
2309 WREG32(VGT_GS_VERTEX_REUSE
, 16);
2310 WREG32(PA_SU_LINE_STIPPLE_VALUE
, 0);
2311 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
2313 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL
, 14);
2314 WREG32(VGT_OUT_DEALLOC_CNTL
, 16);
2316 WREG32(CB_PERF_CTR0_SEL_0
, 0);
2317 WREG32(CB_PERF_CTR0_SEL_1
, 0);
2318 WREG32(CB_PERF_CTR1_SEL_0
, 0);
2319 WREG32(CB_PERF_CTR1_SEL_1
, 0);
2320 WREG32(CB_PERF_CTR2_SEL_0
, 0);
2321 WREG32(CB_PERF_CTR2_SEL_1
, 0);
2322 WREG32(CB_PERF_CTR3_SEL_0
, 0);
2323 WREG32(CB_PERF_CTR3_SEL_1
, 0);
2325 /* clear render buffer base addresses */
2326 WREG32(CB_COLOR0_BASE
, 0);
2327 WREG32(CB_COLOR1_BASE
, 0);
2328 WREG32(CB_COLOR2_BASE
, 0);
2329 WREG32(CB_COLOR3_BASE
, 0);
2330 WREG32(CB_COLOR4_BASE
, 0);
2331 WREG32(CB_COLOR5_BASE
, 0);
2332 WREG32(CB_COLOR6_BASE
, 0);
2333 WREG32(CB_COLOR7_BASE
, 0);
2334 WREG32(CB_COLOR8_BASE
, 0);
2335 WREG32(CB_COLOR9_BASE
, 0);
2336 WREG32(CB_COLOR10_BASE
, 0);
2337 WREG32(CB_COLOR11_BASE
, 0);
2339 /* set the shader const cache sizes to 0 */
2340 for (i
= SQ_ALU_CONST_BUFFER_SIZE_PS_0
; i
< 0x28200; i
+= 4)
2342 for (i
= SQ_ALU_CONST_BUFFER_SIZE_HS_0
; i
< 0x29000; i
+= 4)
2345 tmp
= RREG32(HDP_MISC_CNTL
);
2346 tmp
|= HDP_FLUSH_INVALIDATE_CACHE
;
2347 WREG32(HDP_MISC_CNTL
, tmp
);
2349 hdp_host_path_cntl
= RREG32(HDP_HOST_PATH_CNTL
);
2350 WREG32(HDP_HOST_PATH_CNTL
, hdp_host_path_cntl
);
2352 WREG32(PA_CL_ENHANCE
, CLIP_VTX_REORDER_ENA
| NUM_CLIP_SEQ(3));
2358 int evergreen_mc_init(struct radeon_device
*rdev
)
2361 int chansize
, numchan
;
2363 /* Get VRAM informations */
2364 rdev
->mc
.vram_is_ddr
= true;
2365 if (rdev
->flags
& RADEON_IS_IGP
)
2366 tmp
= RREG32(FUS_MC_ARB_RAMCFG
);
2368 tmp
= RREG32(MC_ARB_RAMCFG
);
2369 if (tmp
& CHANSIZE_OVERRIDE
) {
2371 } else if (tmp
& CHANSIZE_MASK
) {
2376 tmp
= RREG32(MC_SHARED_CHMAP
);
2377 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
2392 rdev
->mc
.vram_width
= numchan
* chansize
;
2393 /* Could aper size report 0 ? */
2394 rdev
->mc
.aper_base
= pci_resource_start(rdev
->pdev
, 0);
2395 rdev
->mc
.aper_size
= pci_resource_len(rdev
->pdev
, 0);
2396 /* Setup GPU memory space */
2397 if (rdev
->flags
& RADEON_IS_IGP
) {
2398 /* size in bytes on fusion */
2399 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
);
2400 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
);
2402 /* size in MB on evergreen */
2403 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
) * 1024 * 1024;
2404 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
) * 1024 * 1024;
2406 rdev
->mc
.visible_vram_size
= rdev
->mc
.aper_size
;
2407 r700_vram_gtt_location(rdev
, &rdev
->mc
);
2408 radeon_update_bandwidth_info(rdev
);
2413 bool evergreen_gpu_is_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
2417 u32 grbm_status_se0
, grbm_status_se1
;
2418 struct r100_gpu_lockup
*lockup
= &rdev
->config
.evergreen
.lockup
;
2421 srbm_status
= RREG32(SRBM_STATUS
);
2422 grbm_status
= RREG32(GRBM_STATUS
);
2423 grbm_status_se0
= RREG32(GRBM_STATUS_SE0
);
2424 grbm_status_se1
= RREG32(GRBM_STATUS_SE1
);
2425 if (!(grbm_status
& GUI_ACTIVE
)) {
2426 r100_gpu_lockup_update(lockup
, ring
);
2429 /* force CP activities */
2430 r
= radeon_ring_lock(rdev
, ring
, 2);
2433 radeon_ring_write(ring
, 0x80000000);
2434 radeon_ring_write(ring
, 0x80000000);
2435 radeon_ring_unlock_commit(rdev
, ring
);
2437 ring
->rptr
= RREG32(CP_RB_RPTR
);
2438 return r100_gpu_cp_is_lockup(rdev
, lockup
, ring
);
2441 static int evergreen_gpu_soft_reset(struct radeon_device
*rdev
)
2443 struct evergreen_mc_save save
;
2446 if (!(RREG32(GRBM_STATUS
) & GUI_ACTIVE
))
2449 dev_info(rdev
->dev
, "GPU softreset \n");
2450 dev_info(rdev
->dev
, " GRBM_STATUS=0x%08X\n",
2451 RREG32(GRBM_STATUS
));
2452 dev_info(rdev
->dev
, " GRBM_STATUS_SE0=0x%08X\n",
2453 RREG32(GRBM_STATUS_SE0
));
2454 dev_info(rdev
->dev
, " GRBM_STATUS_SE1=0x%08X\n",
2455 RREG32(GRBM_STATUS_SE1
));
2456 dev_info(rdev
->dev
, " SRBM_STATUS=0x%08X\n",
2457 RREG32(SRBM_STATUS
));
2458 evergreen_mc_stop(rdev
, &save
);
2459 if (evergreen_mc_wait_for_idle(rdev
)) {
2460 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
2462 /* Disable CP parsing/prefetching */
2463 WREG32(CP_ME_CNTL
, CP_ME_HALT
| CP_PFP_HALT
);
2465 /* reset all the gfx blocks */
2466 grbm_reset
= (SOFT_RESET_CP
|
2479 dev_info(rdev
->dev
, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset
);
2480 WREG32(GRBM_SOFT_RESET
, grbm_reset
);
2481 (void)RREG32(GRBM_SOFT_RESET
);
2483 WREG32(GRBM_SOFT_RESET
, 0);
2484 (void)RREG32(GRBM_SOFT_RESET
);
2485 /* Wait a little for things to settle down */
2487 dev_info(rdev
->dev
, " GRBM_STATUS=0x%08X\n",
2488 RREG32(GRBM_STATUS
));
2489 dev_info(rdev
->dev
, " GRBM_STATUS_SE0=0x%08X\n",
2490 RREG32(GRBM_STATUS_SE0
));
2491 dev_info(rdev
->dev
, " GRBM_STATUS_SE1=0x%08X\n",
2492 RREG32(GRBM_STATUS_SE1
));
2493 dev_info(rdev
->dev
, " SRBM_STATUS=0x%08X\n",
2494 RREG32(SRBM_STATUS
));
2495 evergreen_mc_resume(rdev
, &save
);
2499 int evergreen_asic_reset(struct radeon_device
*rdev
)
2501 return evergreen_gpu_soft_reset(rdev
);
2506 u32
evergreen_get_vblank_counter(struct radeon_device
*rdev
, int crtc
)
2510 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC0_REGISTER_OFFSET
);
2512 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
2514 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC2_REGISTER_OFFSET
);
2516 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC3_REGISTER_OFFSET
);
2518 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC4_REGISTER_OFFSET
);
2520 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
2526 void evergreen_disable_interrupt_state(struct radeon_device
*rdev
)
2530 if (rdev
->family
>= CHIP_CAYMAN
) {
2531 cayman_cp_int_cntl_setup(rdev
, 0,
2532 CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
);
2533 cayman_cp_int_cntl_setup(rdev
, 1, 0);
2534 cayman_cp_int_cntl_setup(rdev
, 2, 0);
2536 WREG32(CP_INT_CNTL
, CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
);
2537 WREG32(GRBM_INT_CNTL
, 0);
2538 WREG32(INT_MASK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
2539 WREG32(INT_MASK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
2540 if (rdev
->num_crtc
>= 4) {
2541 WREG32(INT_MASK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
2542 WREG32(INT_MASK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
2544 if (rdev
->num_crtc
>= 6) {
2545 WREG32(INT_MASK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
2546 WREG32(INT_MASK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
2549 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
2550 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
2551 if (rdev
->num_crtc
>= 4) {
2552 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
2553 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
2555 if (rdev
->num_crtc
>= 6) {
2556 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
2557 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
2560 WREG32(DACA_AUTODETECT_INT_CONTROL
, 0);
2561 WREG32(DACB_AUTODETECT_INT_CONTROL
, 0);
2563 tmp
= RREG32(DC_HPD1_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2564 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
2565 tmp
= RREG32(DC_HPD2_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2566 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
2567 tmp
= RREG32(DC_HPD3_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2568 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
2569 tmp
= RREG32(DC_HPD4_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2570 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
2571 tmp
= RREG32(DC_HPD5_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2572 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
2573 tmp
= RREG32(DC_HPD6_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2574 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
2578 int evergreen_irq_set(struct radeon_device
*rdev
)
2580 u32 cp_int_cntl
= CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
;
2581 u32 cp_int_cntl1
= 0, cp_int_cntl2
= 0;
2582 u32 crtc1
= 0, crtc2
= 0, crtc3
= 0, crtc4
= 0, crtc5
= 0, crtc6
= 0;
2583 u32 hpd1
, hpd2
, hpd3
, hpd4
, hpd5
, hpd6
;
2584 u32 grbm_int_cntl
= 0;
2585 u32 grph1
= 0, grph2
= 0, grph3
= 0, grph4
= 0, grph5
= 0, grph6
= 0;
2587 if (!rdev
->irq
.installed
) {
2588 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2591 /* don't enable anything if the ih is disabled */
2592 if (!rdev
->ih
.enabled
) {
2593 r600_disable_interrupts(rdev
);
2594 /* force the active interrupt state to all disabled */
2595 evergreen_disable_interrupt_state(rdev
);
2599 hpd1
= RREG32(DC_HPD1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2600 hpd2
= RREG32(DC_HPD2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2601 hpd3
= RREG32(DC_HPD3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2602 hpd4
= RREG32(DC_HPD4_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2603 hpd5
= RREG32(DC_HPD5_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2604 hpd6
= RREG32(DC_HPD6_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2606 if (rdev
->family
>= CHIP_CAYMAN
) {
2607 /* enable CP interrupts on all rings */
2608 if (rdev
->irq
.sw_int
[RADEON_RING_TYPE_GFX_INDEX
]) {
2609 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2610 cp_int_cntl
|= TIME_STAMP_INT_ENABLE
;
2612 if (rdev
->irq
.sw_int
[CAYMAN_RING_TYPE_CP1_INDEX
]) {
2613 DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
2614 cp_int_cntl1
|= TIME_STAMP_INT_ENABLE
;
2616 if (rdev
->irq
.sw_int
[CAYMAN_RING_TYPE_CP2_INDEX
]) {
2617 DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
2618 cp_int_cntl2
|= TIME_STAMP_INT_ENABLE
;
2621 if (rdev
->irq
.sw_int
[RADEON_RING_TYPE_GFX_INDEX
]) {
2622 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2623 cp_int_cntl
|= RB_INT_ENABLE
;
2624 cp_int_cntl
|= TIME_STAMP_INT_ENABLE
;
2628 if (rdev
->irq
.crtc_vblank_int
[0] ||
2629 rdev
->irq
.pflip
[0]) {
2630 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
2631 crtc1
|= VBLANK_INT_MASK
;
2633 if (rdev
->irq
.crtc_vblank_int
[1] ||
2634 rdev
->irq
.pflip
[1]) {
2635 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
2636 crtc2
|= VBLANK_INT_MASK
;
2638 if (rdev
->irq
.crtc_vblank_int
[2] ||
2639 rdev
->irq
.pflip
[2]) {
2640 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
2641 crtc3
|= VBLANK_INT_MASK
;
2643 if (rdev
->irq
.crtc_vblank_int
[3] ||
2644 rdev
->irq
.pflip
[3]) {
2645 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
2646 crtc4
|= VBLANK_INT_MASK
;
2648 if (rdev
->irq
.crtc_vblank_int
[4] ||
2649 rdev
->irq
.pflip
[4]) {
2650 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
2651 crtc5
|= VBLANK_INT_MASK
;
2653 if (rdev
->irq
.crtc_vblank_int
[5] ||
2654 rdev
->irq
.pflip
[5]) {
2655 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
2656 crtc6
|= VBLANK_INT_MASK
;
2658 if (rdev
->irq
.hpd
[0]) {
2659 DRM_DEBUG("evergreen_irq_set: hpd 1\n");
2660 hpd1
|= DC_HPDx_INT_EN
;
2662 if (rdev
->irq
.hpd
[1]) {
2663 DRM_DEBUG("evergreen_irq_set: hpd 2\n");
2664 hpd2
|= DC_HPDx_INT_EN
;
2666 if (rdev
->irq
.hpd
[2]) {
2667 DRM_DEBUG("evergreen_irq_set: hpd 3\n");
2668 hpd3
|= DC_HPDx_INT_EN
;
2670 if (rdev
->irq
.hpd
[3]) {
2671 DRM_DEBUG("evergreen_irq_set: hpd 4\n");
2672 hpd4
|= DC_HPDx_INT_EN
;
2674 if (rdev
->irq
.hpd
[4]) {
2675 DRM_DEBUG("evergreen_irq_set: hpd 5\n");
2676 hpd5
|= DC_HPDx_INT_EN
;
2678 if (rdev
->irq
.hpd
[5]) {
2679 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
2680 hpd6
|= DC_HPDx_INT_EN
;
2682 if (rdev
->irq
.gui_idle
) {
2683 DRM_DEBUG("gui idle\n");
2684 grbm_int_cntl
|= GUI_IDLE_INT_ENABLE
;
2687 if (rdev
->family
>= CHIP_CAYMAN
) {
2688 cayman_cp_int_cntl_setup(rdev
, 0, cp_int_cntl
);
2689 cayman_cp_int_cntl_setup(rdev
, 1, cp_int_cntl1
);
2690 cayman_cp_int_cntl_setup(rdev
, 2, cp_int_cntl2
);
2692 WREG32(CP_INT_CNTL
, cp_int_cntl
);
2693 WREG32(GRBM_INT_CNTL
, grbm_int_cntl
);
2695 WREG32(INT_MASK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, crtc1
);
2696 WREG32(INT_MASK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, crtc2
);
2697 if (rdev
->num_crtc
>= 4) {
2698 WREG32(INT_MASK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, crtc3
);
2699 WREG32(INT_MASK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, crtc4
);
2701 if (rdev
->num_crtc
>= 6) {
2702 WREG32(INT_MASK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, crtc5
);
2703 WREG32(INT_MASK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, crtc6
);
2706 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, grph1
);
2707 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, grph2
);
2708 if (rdev
->num_crtc
>= 4) {
2709 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, grph3
);
2710 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, grph4
);
2712 if (rdev
->num_crtc
>= 6) {
2713 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, grph5
);
2714 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, grph6
);
2717 WREG32(DC_HPD1_INT_CONTROL
, hpd1
);
2718 WREG32(DC_HPD2_INT_CONTROL
, hpd2
);
2719 WREG32(DC_HPD3_INT_CONTROL
, hpd3
);
2720 WREG32(DC_HPD4_INT_CONTROL
, hpd4
);
2721 WREG32(DC_HPD5_INT_CONTROL
, hpd5
);
2722 WREG32(DC_HPD6_INT_CONTROL
, hpd6
);
2727 static void evergreen_irq_ack(struct radeon_device
*rdev
)
2731 rdev
->irq
.stat_regs
.evergreen
.disp_int
= RREG32(DISP_INTERRUPT_STATUS
);
2732 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE
);
2733 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE2
);
2734 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE3
);
2735 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE4
);
2736 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE5
);
2737 rdev
->irq
.stat_regs
.evergreen
.d1grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
);
2738 rdev
->irq
.stat_regs
.evergreen
.d2grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
2739 if (rdev
->num_crtc
>= 4) {
2740 rdev
->irq
.stat_regs
.evergreen
.d3grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
);
2741 rdev
->irq
.stat_regs
.evergreen
.d4grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
);
2743 if (rdev
->num_crtc
>= 6) {
2744 rdev
->irq
.stat_regs
.evergreen
.d5grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
);
2745 rdev
->irq
.stat_regs
.evergreen
.d6grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
2748 if (rdev
->irq
.stat_regs
.evergreen
.d1grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2749 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2750 if (rdev
->irq
.stat_regs
.evergreen
.d2grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2751 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2752 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VBLANK_INTERRUPT
)
2753 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, VBLANK_ACK
);
2754 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VLINE_INTERRUPT
)
2755 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, VLINE_ACK
);
2756 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VBLANK_INTERRUPT
)
2757 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, VBLANK_ACK
);
2758 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VLINE_INTERRUPT
)
2759 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, VLINE_ACK
);
2761 if (rdev
->num_crtc
>= 4) {
2762 if (rdev
->irq
.stat_regs
.evergreen
.d3grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2763 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2764 if (rdev
->irq
.stat_regs
.evergreen
.d4grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2765 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2766 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VBLANK_INTERRUPT
)
2767 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, VBLANK_ACK
);
2768 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VLINE_INTERRUPT
)
2769 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, VLINE_ACK
);
2770 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VBLANK_INTERRUPT
)
2771 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, VBLANK_ACK
);
2772 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VLINE_INTERRUPT
)
2773 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, VLINE_ACK
);
2776 if (rdev
->num_crtc
>= 6) {
2777 if (rdev
->irq
.stat_regs
.evergreen
.d5grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2778 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2779 if (rdev
->irq
.stat_regs
.evergreen
.d6grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2780 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2781 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VBLANK_INTERRUPT
)
2782 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, VBLANK_ACK
);
2783 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VLINE_INTERRUPT
)
2784 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, VLINE_ACK
);
2785 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VBLANK_INTERRUPT
)
2786 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, VBLANK_ACK
);
2787 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VLINE_INTERRUPT
)
2788 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, VLINE_ACK
);
2791 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& DC_HPD1_INTERRUPT
) {
2792 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
2793 tmp
|= DC_HPDx_INT_ACK
;
2794 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
2796 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& DC_HPD2_INTERRUPT
) {
2797 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
2798 tmp
|= DC_HPDx_INT_ACK
;
2799 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
2801 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& DC_HPD3_INTERRUPT
) {
2802 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
2803 tmp
|= DC_HPDx_INT_ACK
;
2804 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
2806 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& DC_HPD4_INTERRUPT
) {
2807 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
2808 tmp
|= DC_HPDx_INT_ACK
;
2809 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
2811 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& DC_HPD5_INTERRUPT
) {
2812 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
2813 tmp
|= DC_HPDx_INT_ACK
;
2814 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
2816 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& DC_HPD6_INTERRUPT
) {
2817 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
2818 tmp
|= DC_HPDx_INT_ACK
;
2819 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
2823 void evergreen_irq_disable(struct radeon_device
*rdev
)
2825 r600_disable_interrupts(rdev
);
2826 /* Wait and acknowledge irq */
2828 evergreen_irq_ack(rdev
);
2829 evergreen_disable_interrupt_state(rdev
);
2832 void evergreen_irq_suspend(struct radeon_device
*rdev
)
2834 evergreen_irq_disable(rdev
);
2835 r600_rlc_stop(rdev
);
2838 static u32
evergreen_get_ih_wptr(struct radeon_device
*rdev
)
2842 if (rdev
->wb
.enabled
)
2843 wptr
= le32_to_cpu(rdev
->wb
.wb
[R600_WB_IH_WPTR_OFFSET
/4]);
2845 wptr
= RREG32(IH_RB_WPTR
);
2847 if (wptr
& RB_OVERFLOW
) {
2848 /* When a ring buffer overflow happen start parsing interrupt
2849 * from the last not overwritten vector (wptr + 16). Hopefully
2850 * this should allow us to catchup.
2852 dev_warn(rdev
->dev
, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2853 wptr
, rdev
->ih
.rptr
, (wptr
+ 16) + rdev
->ih
.ptr_mask
);
2854 rdev
->ih
.rptr
= (wptr
+ 16) & rdev
->ih
.ptr_mask
;
2855 tmp
= RREG32(IH_RB_CNTL
);
2856 tmp
|= IH_WPTR_OVERFLOW_CLEAR
;
2857 WREG32(IH_RB_CNTL
, tmp
);
2859 return (wptr
& rdev
->ih
.ptr_mask
);
2862 int evergreen_irq_process(struct radeon_device
*rdev
)
2866 u32 src_id
, src_data
;
2868 unsigned long flags
;
2869 bool queue_hotplug
= false;
2871 if (!rdev
->ih
.enabled
|| rdev
->shutdown
)
2874 wptr
= evergreen_get_ih_wptr(rdev
);
2875 rptr
= rdev
->ih
.rptr
;
2876 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr
, wptr
);
2878 spin_lock_irqsave(&rdev
->ih
.lock
, flags
);
2880 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
2884 /* Order reading of wptr vs. reading of IH ring data */
2887 /* display interrupts */
2888 evergreen_irq_ack(rdev
);
2890 rdev
->ih
.wptr
= wptr
;
2891 while (rptr
!= wptr
) {
2892 /* wptr/rptr are in bytes! */
2893 ring_index
= rptr
/ 4;
2894 src_id
= le32_to_cpu(rdev
->ih
.ring
[ring_index
]) & 0xff;
2895 src_data
= le32_to_cpu(rdev
->ih
.ring
[ring_index
+ 1]) & 0xfffffff;
2898 case 1: /* D1 vblank/vline */
2900 case 0: /* D1 vblank */
2901 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VBLANK_INTERRUPT
) {
2902 if (rdev
->irq
.crtc_vblank_int
[0]) {
2903 drm_handle_vblank(rdev
->ddev
, 0);
2904 rdev
->pm
.vblank_sync
= true;
2905 wake_up(&rdev
->irq
.vblank_queue
);
2907 if (rdev
->irq
.pflip
[0])
2908 radeon_crtc_handle_flip(rdev
, 0);
2909 rdev
->irq
.stat_regs
.evergreen
.disp_int
&= ~LB_D1_VBLANK_INTERRUPT
;
2910 DRM_DEBUG("IH: D1 vblank\n");
2913 case 1: /* D1 vline */
2914 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VLINE_INTERRUPT
) {
2915 rdev
->irq
.stat_regs
.evergreen
.disp_int
&= ~LB_D1_VLINE_INTERRUPT
;
2916 DRM_DEBUG("IH: D1 vline\n");
2920 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2924 case 2: /* D2 vblank/vline */
2926 case 0: /* D2 vblank */
2927 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VBLANK_INTERRUPT
) {
2928 if (rdev
->irq
.crtc_vblank_int
[1]) {
2929 drm_handle_vblank(rdev
->ddev
, 1);
2930 rdev
->pm
.vblank_sync
= true;
2931 wake_up(&rdev
->irq
.vblank_queue
);
2933 if (rdev
->irq
.pflip
[1])
2934 radeon_crtc_handle_flip(rdev
, 1);
2935 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
&= ~LB_D2_VBLANK_INTERRUPT
;
2936 DRM_DEBUG("IH: D2 vblank\n");
2939 case 1: /* D2 vline */
2940 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VLINE_INTERRUPT
) {
2941 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
&= ~LB_D2_VLINE_INTERRUPT
;
2942 DRM_DEBUG("IH: D2 vline\n");
2946 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2950 case 3: /* D3 vblank/vline */
2952 case 0: /* D3 vblank */
2953 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VBLANK_INTERRUPT
) {
2954 if (rdev
->irq
.crtc_vblank_int
[2]) {
2955 drm_handle_vblank(rdev
->ddev
, 2);
2956 rdev
->pm
.vblank_sync
= true;
2957 wake_up(&rdev
->irq
.vblank_queue
);
2959 if (rdev
->irq
.pflip
[2])
2960 radeon_crtc_handle_flip(rdev
, 2);
2961 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
&= ~LB_D3_VBLANK_INTERRUPT
;
2962 DRM_DEBUG("IH: D3 vblank\n");
2965 case 1: /* D3 vline */
2966 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VLINE_INTERRUPT
) {
2967 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
&= ~LB_D3_VLINE_INTERRUPT
;
2968 DRM_DEBUG("IH: D3 vline\n");
2972 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2976 case 4: /* D4 vblank/vline */
2978 case 0: /* D4 vblank */
2979 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VBLANK_INTERRUPT
) {
2980 if (rdev
->irq
.crtc_vblank_int
[3]) {
2981 drm_handle_vblank(rdev
->ddev
, 3);
2982 rdev
->pm
.vblank_sync
= true;
2983 wake_up(&rdev
->irq
.vblank_queue
);
2985 if (rdev
->irq
.pflip
[3])
2986 radeon_crtc_handle_flip(rdev
, 3);
2987 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
&= ~LB_D4_VBLANK_INTERRUPT
;
2988 DRM_DEBUG("IH: D4 vblank\n");
2991 case 1: /* D4 vline */
2992 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VLINE_INTERRUPT
) {
2993 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
&= ~LB_D4_VLINE_INTERRUPT
;
2994 DRM_DEBUG("IH: D4 vline\n");
2998 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3002 case 5: /* D5 vblank/vline */
3004 case 0: /* D5 vblank */
3005 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VBLANK_INTERRUPT
) {
3006 if (rdev
->irq
.crtc_vblank_int
[4]) {
3007 drm_handle_vblank(rdev
->ddev
, 4);
3008 rdev
->pm
.vblank_sync
= true;
3009 wake_up(&rdev
->irq
.vblank_queue
);
3011 if (rdev
->irq
.pflip
[4])
3012 radeon_crtc_handle_flip(rdev
, 4);
3013 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
&= ~LB_D5_VBLANK_INTERRUPT
;
3014 DRM_DEBUG("IH: D5 vblank\n");
3017 case 1: /* D5 vline */
3018 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VLINE_INTERRUPT
) {
3019 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
&= ~LB_D5_VLINE_INTERRUPT
;
3020 DRM_DEBUG("IH: D5 vline\n");
3024 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3028 case 6: /* D6 vblank/vline */
3030 case 0: /* D6 vblank */
3031 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VBLANK_INTERRUPT
) {
3032 if (rdev
->irq
.crtc_vblank_int
[5]) {
3033 drm_handle_vblank(rdev
->ddev
, 5);
3034 rdev
->pm
.vblank_sync
= true;
3035 wake_up(&rdev
->irq
.vblank_queue
);
3037 if (rdev
->irq
.pflip
[5])
3038 radeon_crtc_handle_flip(rdev
, 5);
3039 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
&= ~LB_D6_VBLANK_INTERRUPT
;
3040 DRM_DEBUG("IH: D6 vblank\n");
3043 case 1: /* D6 vline */
3044 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VLINE_INTERRUPT
) {
3045 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
&= ~LB_D6_VLINE_INTERRUPT
;
3046 DRM_DEBUG("IH: D6 vline\n");
3050 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3054 case 42: /* HPD hotplug */
3057 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& DC_HPD1_INTERRUPT
) {
3058 rdev
->irq
.stat_regs
.evergreen
.disp_int
&= ~DC_HPD1_INTERRUPT
;
3059 queue_hotplug
= true;
3060 DRM_DEBUG("IH: HPD1\n");
3064 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& DC_HPD2_INTERRUPT
) {
3065 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
&= ~DC_HPD2_INTERRUPT
;
3066 queue_hotplug
= true;
3067 DRM_DEBUG("IH: HPD2\n");
3071 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& DC_HPD3_INTERRUPT
) {
3072 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
&= ~DC_HPD3_INTERRUPT
;
3073 queue_hotplug
= true;
3074 DRM_DEBUG("IH: HPD3\n");
3078 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& DC_HPD4_INTERRUPT
) {
3079 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
&= ~DC_HPD4_INTERRUPT
;
3080 queue_hotplug
= true;
3081 DRM_DEBUG("IH: HPD4\n");
3085 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& DC_HPD5_INTERRUPT
) {
3086 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
&= ~DC_HPD5_INTERRUPT
;
3087 queue_hotplug
= true;
3088 DRM_DEBUG("IH: HPD5\n");
3092 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& DC_HPD6_INTERRUPT
) {
3093 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
&= ~DC_HPD6_INTERRUPT
;
3094 queue_hotplug
= true;
3095 DRM_DEBUG("IH: HPD6\n");
3099 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3103 case 176: /* CP_INT in ring buffer */
3104 case 177: /* CP_INT in IB1 */
3105 case 178: /* CP_INT in IB2 */
3106 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data
);
3107 radeon_fence_process(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
3109 case 181: /* CP EOP event */
3110 DRM_DEBUG("IH: CP EOP\n");
3111 if (rdev
->family
>= CHIP_CAYMAN
) {
3114 radeon_fence_process(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
3117 radeon_fence_process(rdev
, CAYMAN_RING_TYPE_CP1_INDEX
);
3120 radeon_fence_process(rdev
, CAYMAN_RING_TYPE_CP2_INDEX
);
3124 radeon_fence_process(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
3126 case 233: /* GUI IDLE */
3127 DRM_DEBUG("IH: GUI idle\n");
3128 rdev
->pm
.gui_idle
= true;
3129 wake_up(&rdev
->irq
.idle_queue
);
3132 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3136 /* wptr/rptr are in bytes! */
3138 rptr
&= rdev
->ih
.ptr_mask
;
3140 /* make sure wptr hasn't changed while processing */
3141 wptr
= evergreen_get_ih_wptr(rdev
);
3142 if (wptr
!= rdev
->ih
.wptr
)
3145 schedule_work(&rdev
->hotplug_work
);
3146 rdev
->ih
.rptr
= rptr
;
3147 WREG32(IH_RB_RPTR
, rdev
->ih
.rptr
);
3148 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
3152 static int evergreen_startup(struct radeon_device
*rdev
)
3154 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
3157 /* enable pcie gen2 link */
3158 evergreen_pcie_gen2_enable(rdev
);
3160 if (ASIC_IS_DCE5(rdev
)) {
3161 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
|| !rdev
->mc_fw
) {
3162 r
= ni_init_microcode(rdev
);
3164 DRM_ERROR("Failed to load firmware!\n");
3168 r
= ni_mc_load_microcode(rdev
);
3170 DRM_ERROR("Failed to load MC firmware!\n");
3174 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
) {
3175 r
= r600_init_microcode(rdev
);
3177 DRM_ERROR("Failed to load firmware!\n");
3183 r
= r600_vram_scratch_init(rdev
);
3187 evergreen_mc_program(rdev
);
3188 if (rdev
->flags
& RADEON_IS_AGP
) {
3189 evergreen_agp_enable(rdev
);
3191 r
= evergreen_pcie_gart_enable(rdev
);
3195 evergreen_gpu_init(rdev
);
3197 r
= evergreen_blit_init(rdev
);
3199 r600_blit_fini(rdev
);
3200 rdev
->asic
->copy
= NULL
;
3201 dev_warn(rdev
->dev
, "failed blitter (%d) falling back to memcpy\n", r
);
3204 /* allocate wb buffer */
3205 r
= radeon_wb_init(rdev
);
3209 r
= radeon_fence_driver_start_ring(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
3211 dev_err(rdev
->dev
, "failed initializing CP fences (%d).\n", r
);
3216 r
= r600_irq_init(rdev
);
3218 DRM_ERROR("radeon: IH init failed (%d).\n", r
);
3219 radeon_irq_kms_fini(rdev
);
3222 evergreen_irq_set(rdev
);
3224 r
= radeon_ring_init(rdev
, ring
, ring
->ring_size
, RADEON_WB_CP_RPTR_OFFSET
,
3225 R600_CP_RB_RPTR
, R600_CP_RB_WPTR
,
3226 0, 0xfffff, RADEON_CP_PACKET2
);
3229 r
= evergreen_cp_load_microcode(rdev
);
3232 r
= evergreen_cp_resume(rdev
);
3236 r
= radeon_ib_pool_start(rdev
);
3240 r
= r600_ib_test(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
3242 DRM_ERROR("radeon: failed testing IB (%d).\n", r
);
3243 rdev
->accel_working
= false;
3247 r
= r600_audio_init(rdev
);
3249 DRM_ERROR("radeon: audio init failed\n");
3256 int evergreen_resume(struct radeon_device
*rdev
)
3260 /* reset the asic, the gfx blocks are often in a bad state
3261 * after the driver is unloaded or after a resume
3263 if (radeon_asic_reset(rdev
))
3264 dev_warn(rdev
->dev
, "GPU reset failed !\n");
3265 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
3266 * posting will perform necessary task to bring back GPU into good
3270 atom_asic_init(rdev
->mode_info
.atom_context
);
3272 rdev
->accel_working
= true;
3273 r
= evergreen_startup(rdev
);
3275 DRM_ERROR("evergreen startup failed on resume\n");
3283 int evergreen_suspend(struct radeon_device
*rdev
)
3285 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
3287 r600_audio_fini(rdev
);
3288 /* FIXME: we should wait for ring to be empty */
3289 radeon_ib_pool_suspend(rdev
);
3290 r600_blit_suspend(rdev
);
3292 ring
->ready
= false;
3293 evergreen_irq_suspend(rdev
);
3294 radeon_wb_disable(rdev
);
3295 evergreen_pcie_gart_disable(rdev
);
3300 /* Plan is to move initialization in that function and use
3301 * helper function so that radeon_device_init pretty much
3302 * do nothing more than calling asic specific function. This
3303 * should also allow to remove a bunch of callback function
3306 int evergreen_init(struct radeon_device
*rdev
)
3310 /* This don't do much */
3311 r
= radeon_gem_init(rdev
);
3315 if (!radeon_get_bios(rdev
)) {
3316 if (ASIC_IS_AVIVO(rdev
))
3319 /* Must be an ATOMBIOS */
3320 if (!rdev
->is_atom_bios
) {
3321 dev_err(rdev
->dev
, "Expecting atombios for evergreen GPU\n");
3324 r
= radeon_atombios_init(rdev
);
3327 /* reset the asic, the gfx blocks are often in a bad state
3328 * after the driver is unloaded or after a resume
3330 if (radeon_asic_reset(rdev
))
3331 dev_warn(rdev
->dev
, "GPU reset failed !\n");
3332 /* Post card if necessary */
3333 if (!radeon_card_posted(rdev
)) {
3335 dev_err(rdev
->dev
, "Card not posted and no BIOS - ignoring\n");
3338 DRM_INFO("GPU not posted. posting now...\n");
3339 atom_asic_init(rdev
->mode_info
.atom_context
);
3341 /* Initialize scratch registers */
3342 r600_scratch_init(rdev
);
3343 /* Initialize surface registers */
3344 radeon_surface_init(rdev
);
3345 /* Initialize clocks */
3346 radeon_get_clock_info(rdev
->ddev
);
3348 r
= radeon_fence_driver_init(rdev
);
3351 /* initialize AGP */
3352 if (rdev
->flags
& RADEON_IS_AGP
) {
3353 r
= radeon_agp_init(rdev
);
3355 radeon_agp_disable(rdev
);
3357 /* initialize memory controller */
3358 r
= evergreen_mc_init(rdev
);
3361 /* Memory manager */
3362 r
= radeon_bo_init(rdev
);
3366 r
= radeon_irq_kms_init(rdev
);
3370 rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ring_obj
= NULL
;
3371 r600_ring_init(rdev
, &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
], 1024 * 1024);
3373 rdev
->ih
.ring_obj
= NULL
;
3374 r600_ih_ring_init(rdev
, 64 * 1024);
3376 r
= r600_pcie_gart_init(rdev
);
3380 r
= radeon_ib_pool_init(rdev
);
3381 rdev
->accel_working
= true;
3383 dev_err(rdev
->dev
, "IB initialization failed (%d).\n", r
);
3384 rdev
->accel_working
= false;
3387 r
= evergreen_startup(rdev
);
3389 dev_err(rdev
->dev
, "disabling GPU acceleration\n");
3391 r600_irq_fini(rdev
);
3392 radeon_wb_fini(rdev
);
3394 radeon_irq_kms_fini(rdev
);
3395 evergreen_pcie_gart_fini(rdev
);
3396 rdev
->accel_working
= false;
3399 /* Don't start up if the MC ucode is missing on BTC parts.
3400 * The default clocks and voltages before the MC ucode
3401 * is loaded are not suffient for advanced operations.
3403 if (ASIC_IS_DCE5(rdev
)) {
3404 if (!rdev
->mc_fw
&& !(rdev
->flags
& RADEON_IS_IGP
)) {
3405 DRM_ERROR("radeon: MC ucode required for NI+.\n");
3413 void evergreen_fini(struct radeon_device
*rdev
)
3415 r600_audio_fini(rdev
);
3416 r600_blit_fini(rdev
);
3418 r600_irq_fini(rdev
);
3419 radeon_wb_fini(rdev
);
3421 radeon_irq_kms_fini(rdev
);
3422 evergreen_pcie_gart_fini(rdev
);
3423 r600_vram_scratch_fini(rdev
);
3424 radeon_gem_fini(rdev
);
3425 radeon_semaphore_driver_fini(rdev
);
3426 radeon_fence_driver_fini(rdev
);
3427 radeon_agp_fini(rdev
);
3428 radeon_bo_fini(rdev
);
3429 radeon_atombios_fini(rdev
);
3434 void evergreen_pcie_gen2_enable(struct radeon_device
*rdev
)
3436 u32 link_width_cntl
, speed_cntl
;
3438 if (radeon_pcie_gen2
== 0)
3441 if (rdev
->flags
& RADEON_IS_IGP
)
3444 if (!(rdev
->flags
& RADEON_IS_PCIE
))
3447 /* x2 cards have a special sequence */
3448 if (ASIC_IS_X2(rdev
))
3451 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3452 if ((speed_cntl
& LC_OTHER_SIDE_EVER_SENT_GEN2
) ||
3453 (speed_cntl
& LC_OTHER_SIDE_SUPPORTS_GEN2
)) {
3455 link_width_cntl
= RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
);
3456 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
3457 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
3459 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3460 speed_cntl
&= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN
;
3461 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3463 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3464 speed_cntl
|= LC_CLR_FAILED_SPD_CHANGE_CNT
;
3465 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3467 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3468 speed_cntl
&= ~LC_CLR_FAILED_SPD_CHANGE_CNT
;
3469 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3471 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3472 speed_cntl
|= LC_GEN2_EN_STRAP
;
3473 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3476 link_width_cntl
= RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
);
3477 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3479 link_width_cntl
|= LC_UPCONFIGURE_DIS
;
3481 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
3482 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);