2 * Copyright 2010 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
29 #include "radeon_asic.h"
30 #include "radeon_drm.h"
31 #include "evergreend.h"
34 #include "evergreen_reg.h"
36 #define EVERGREEN_PFP_UCODE_SIZE 1120
37 #define EVERGREEN_PM4_UCODE_SIZE 1376
39 static void evergreen_gpu_init(struct radeon_device
*rdev
);
40 void evergreen_fini(struct radeon_device
*rdev
);
42 /* get temperature in millidegrees */
43 u32
evergreen_get_temp(struct radeon_device
*rdev
)
45 u32 temp
= (RREG32(CG_MULT_THERMAL_STATUS
) & ASIC_T_MASK
) >>
51 else if ((temp
>> 9) & 1)
54 actual_temp
= (temp
>> 1) & 0xff;
56 return actual_temp
* 1000;
59 void evergreen_pm_misc(struct radeon_device
*rdev
)
61 int req_ps_idx
= rdev
->pm
.requested_power_state_index
;
62 int req_cm_idx
= rdev
->pm
.requested_clock_mode_index
;
63 struct radeon_power_state
*ps
= &rdev
->pm
.power_state
[req_ps_idx
];
64 struct radeon_voltage
*voltage
= &ps
->clock_info
[req_cm_idx
].voltage
;
66 if ((voltage
->type
== VOLTAGE_SW
) && voltage
->voltage
) {
67 if (voltage
->voltage
!= rdev
->pm
.current_vddc
) {
68 radeon_atom_set_voltage(rdev
, voltage
->voltage
);
69 rdev
->pm
.current_vddc
= voltage
->voltage
;
70 DRM_DEBUG("Setting: v: %d\n", voltage
->voltage
);
75 void evergreen_pm_prepare(struct radeon_device
*rdev
)
77 struct drm_device
*ddev
= rdev
->ddev
;
78 struct drm_crtc
*crtc
;
79 struct radeon_crtc
*radeon_crtc
;
82 /* disable any active CRTCs */
83 list_for_each_entry(crtc
, &ddev
->mode_config
.crtc_list
, head
) {
84 radeon_crtc
= to_radeon_crtc(crtc
);
85 if (radeon_crtc
->enabled
) {
86 tmp
= RREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
);
87 tmp
|= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE
;
88 WREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
, tmp
);
93 void evergreen_pm_finish(struct radeon_device
*rdev
)
95 struct drm_device
*ddev
= rdev
->ddev
;
96 struct drm_crtc
*crtc
;
97 struct radeon_crtc
*radeon_crtc
;
100 /* enable any active CRTCs */
101 list_for_each_entry(crtc
, &ddev
->mode_config
.crtc_list
, head
) {
102 radeon_crtc
= to_radeon_crtc(crtc
);
103 if (radeon_crtc
->enabled
) {
104 tmp
= RREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
);
105 tmp
&= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE
;
106 WREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
, tmp
);
111 bool evergreen_hpd_sense(struct radeon_device
*rdev
, enum radeon_hpd_id hpd
)
113 bool connected
= false;
117 if (RREG32(DC_HPD1_INT_STATUS
) & DC_HPDx_SENSE
)
121 if (RREG32(DC_HPD2_INT_STATUS
) & DC_HPDx_SENSE
)
125 if (RREG32(DC_HPD3_INT_STATUS
) & DC_HPDx_SENSE
)
129 if (RREG32(DC_HPD4_INT_STATUS
) & DC_HPDx_SENSE
)
133 if (RREG32(DC_HPD5_INT_STATUS
) & DC_HPDx_SENSE
)
137 if (RREG32(DC_HPD6_INT_STATUS
) & DC_HPDx_SENSE
)
147 void evergreen_hpd_set_polarity(struct radeon_device
*rdev
,
148 enum radeon_hpd_id hpd
)
151 bool connected
= evergreen_hpd_sense(rdev
, hpd
);
155 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
157 tmp
&= ~DC_HPDx_INT_POLARITY
;
159 tmp
|= DC_HPDx_INT_POLARITY
;
160 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
163 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
165 tmp
&= ~DC_HPDx_INT_POLARITY
;
167 tmp
|= DC_HPDx_INT_POLARITY
;
168 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
171 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
173 tmp
&= ~DC_HPDx_INT_POLARITY
;
175 tmp
|= DC_HPDx_INT_POLARITY
;
176 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
179 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
181 tmp
&= ~DC_HPDx_INT_POLARITY
;
183 tmp
|= DC_HPDx_INT_POLARITY
;
184 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
187 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
189 tmp
&= ~DC_HPDx_INT_POLARITY
;
191 tmp
|= DC_HPDx_INT_POLARITY
;
192 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
195 tmp
= RREG32(DC_HPD6_INT_CONTROL
);
197 tmp
&= ~DC_HPDx_INT_POLARITY
;
199 tmp
|= DC_HPDx_INT_POLARITY
;
200 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
207 void evergreen_hpd_init(struct radeon_device
*rdev
)
209 struct drm_device
*dev
= rdev
->ddev
;
210 struct drm_connector
*connector
;
211 u32 tmp
= DC_HPDx_CONNECTION_TIMER(0x9c4) |
212 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN
;
214 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
215 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
216 switch (radeon_connector
->hpd
.hpd
) {
218 WREG32(DC_HPD1_CONTROL
, tmp
);
219 rdev
->irq
.hpd
[0] = true;
222 WREG32(DC_HPD2_CONTROL
, tmp
);
223 rdev
->irq
.hpd
[1] = true;
226 WREG32(DC_HPD3_CONTROL
, tmp
);
227 rdev
->irq
.hpd
[2] = true;
230 WREG32(DC_HPD4_CONTROL
, tmp
);
231 rdev
->irq
.hpd
[3] = true;
234 WREG32(DC_HPD5_CONTROL
, tmp
);
235 rdev
->irq
.hpd
[4] = true;
238 WREG32(DC_HPD6_CONTROL
, tmp
);
239 rdev
->irq
.hpd
[5] = true;
245 if (rdev
->irq
.installed
)
246 evergreen_irq_set(rdev
);
249 void evergreen_hpd_fini(struct radeon_device
*rdev
)
251 struct drm_device
*dev
= rdev
->ddev
;
252 struct drm_connector
*connector
;
254 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
255 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
256 switch (radeon_connector
->hpd
.hpd
) {
258 WREG32(DC_HPD1_CONTROL
, 0);
259 rdev
->irq
.hpd
[0] = false;
262 WREG32(DC_HPD2_CONTROL
, 0);
263 rdev
->irq
.hpd
[1] = false;
266 WREG32(DC_HPD3_CONTROL
, 0);
267 rdev
->irq
.hpd
[2] = false;
270 WREG32(DC_HPD4_CONTROL
, 0);
271 rdev
->irq
.hpd
[3] = false;
274 WREG32(DC_HPD5_CONTROL
, 0);
275 rdev
->irq
.hpd
[4] = false;
278 WREG32(DC_HPD6_CONTROL
, 0);
279 rdev
->irq
.hpd
[5] = false;
287 void evergreen_bandwidth_update(struct radeon_device
*rdev
)
292 static int evergreen_mc_wait_for_idle(struct radeon_device
*rdev
)
297 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
299 tmp
= RREG32(SRBM_STATUS
) & 0x1F00;
310 void evergreen_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
315 WREG32(VM_CONTEXT0_REQUEST_RESPONSE
, REQUEST_TYPE(1));
316 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
318 tmp
= RREG32(VM_CONTEXT0_REQUEST_RESPONSE
);
319 tmp
= (tmp
& RESPONSE_TYPE_MASK
) >> RESPONSE_TYPE_SHIFT
;
321 printk(KERN_WARNING
"[drm] r600 flush TLB failed\n");
331 int evergreen_pcie_gart_enable(struct radeon_device
*rdev
)
336 if (rdev
->gart
.table
.vram
.robj
== NULL
) {
337 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
340 r
= radeon_gart_table_vram_pin(rdev
);
343 radeon_gart_restore(rdev
);
345 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
346 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
347 EFFECTIVE_L2_QUEUE_SIZE(7));
348 WREG32(VM_L2_CNTL2
, 0);
349 WREG32(VM_L2_CNTL3
, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
350 /* Setup TLB control */
351 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
352 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
353 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU
|
354 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
355 WREG32(MC_VM_MD_L1_TLB0_CNTL
, tmp
);
356 WREG32(MC_VM_MD_L1_TLB1_CNTL
, tmp
);
357 WREG32(MC_VM_MD_L1_TLB2_CNTL
, tmp
);
358 WREG32(MC_VM_MB_L1_TLB0_CNTL
, tmp
);
359 WREG32(MC_VM_MB_L1_TLB1_CNTL
, tmp
);
360 WREG32(MC_VM_MB_L1_TLB2_CNTL
, tmp
);
361 WREG32(MC_VM_MB_L1_TLB3_CNTL
, tmp
);
362 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
363 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
364 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
365 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
366 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
367 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
368 (u32
)(rdev
->dummy_page
.addr
>> 12));
369 WREG32(VM_CONTEXT1_CNTL
, 0);
371 evergreen_pcie_gart_tlb_flush(rdev
);
372 rdev
->gart
.ready
= true;
376 void evergreen_pcie_gart_disable(struct radeon_device
*rdev
)
381 /* Disable all tables */
382 WREG32(VM_CONTEXT0_CNTL
, 0);
383 WREG32(VM_CONTEXT1_CNTL
, 0);
386 WREG32(VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
|
387 EFFECTIVE_L2_QUEUE_SIZE(7));
388 WREG32(VM_L2_CNTL2
, 0);
389 WREG32(VM_L2_CNTL3
, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
390 /* Setup TLB control */
391 tmp
= EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
392 WREG32(MC_VM_MD_L1_TLB0_CNTL
, tmp
);
393 WREG32(MC_VM_MD_L1_TLB1_CNTL
, tmp
);
394 WREG32(MC_VM_MD_L1_TLB2_CNTL
, tmp
);
395 WREG32(MC_VM_MB_L1_TLB0_CNTL
, tmp
);
396 WREG32(MC_VM_MB_L1_TLB1_CNTL
, tmp
);
397 WREG32(MC_VM_MB_L1_TLB2_CNTL
, tmp
);
398 WREG32(MC_VM_MB_L1_TLB3_CNTL
, tmp
);
399 if (rdev
->gart
.table
.vram
.robj
) {
400 r
= radeon_bo_reserve(rdev
->gart
.table
.vram
.robj
, false);
401 if (likely(r
== 0)) {
402 radeon_bo_kunmap(rdev
->gart
.table
.vram
.robj
);
403 radeon_bo_unpin(rdev
->gart
.table
.vram
.robj
);
404 radeon_bo_unreserve(rdev
->gart
.table
.vram
.robj
);
409 void evergreen_pcie_gart_fini(struct radeon_device
*rdev
)
411 evergreen_pcie_gart_disable(rdev
);
412 radeon_gart_table_vram_free(rdev
);
413 radeon_gart_fini(rdev
);
417 void evergreen_agp_enable(struct radeon_device
*rdev
)
422 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
423 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
424 EFFECTIVE_L2_QUEUE_SIZE(7));
425 WREG32(VM_L2_CNTL2
, 0);
426 WREG32(VM_L2_CNTL3
, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
427 /* Setup TLB control */
428 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
429 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
430 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU
|
431 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
432 WREG32(MC_VM_MD_L1_TLB0_CNTL
, tmp
);
433 WREG32(MC_VM_MD_L1_TLB1_CNTL
, tmp
);
434 WREG32(MC_VM_MD_L1_TLB2_CNTL
, tmp
);
435 WREG32(MC_VM_MB_L1_TLB0_CNTL
, tmp
);
436 WREG32(MC_VM_MB_L1_TLB1_CNTL
, tmp
);
437 WREG32(MC_VM_MB_L1_TLB2_CNTL
, tmp
);
438 WREG32(MC_VM_MB_L1_TLB3_CNTL
, tmp
);
439 WREG32(VM_CONTEXT0_CNTL
, 0);
440 WREG32(VM_CONTEXT1_CNTL
, 0);
443 static void evergreen_mc_stop(struct radeon_device
*rdev
, struct evergreen_mc_save
*save
)
445 save
->vga_control
[0] = RREG32(D1VGA_CONTROL
);
446 save
->vga_control
[1] = RREG32(D2VGA_CONTROL
);
447 save
->vga_control
[2] = RREG32(EVERGREEN_D3VGA_CONTROL
);
448 save
->vga_control
[3] = RREG32(EVERGREEN_D4VGA_CONTROL
);
449 save
->vga_control
[4] = RREG32(EVERGREEN_D5VGA_CONTROL
);
450 save
->vga_control
[5] = RREG32(EVERGREEN_D6VGA_CONTROL
);
451 save
->vga_render_control
= RREG32(VGA_RENDER_CONTROL
);
452 save
->vga_hdp_control
= RREG32(VGA_HDP_CONTROL
);
453 save
->crtc_control
[0] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
);
454 save
->crtc_control
[1] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
455 save
->crtc_control
[2] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
);
456 save
->crtc_control
[3] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
);
457 save
->crtc_control
[4] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
);
458 save
->crtc_control
[5] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
461 WREG32(VGA_RENDER_CONTROL
, 0);
462 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 1);
463 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 1);
464 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 1);
465 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 1);
466 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 1);
467 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 1);
468 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
469 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
470 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
471 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
472 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
473 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
474 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
475 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
476 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
477 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
478 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
479 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
481 WREG32(D1VGA_CONTROL
, 0);
482 WREG32(D2VGA_CONTROL
, 0);
483 WREG32(EVERGREEN_D3VGA_CONTROL
, 0);
484 WREG32(EVERGREEN_D4VGA_CONTROL
, 0);
485 WREG32(EVERGREEN_D5VGA_CONTROL
, 0);
486 WREG32(EVERGREEN_D6VGA_CONTROL
, 0);
489 static void evergreen_mc_resume(struct radeon_device
*rdev
, struct evergreen_mc_save
*save
)
491 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
492 upper_32_bits(rdev
->mc
.vram_start
));
493 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
494 upper_32_bits(rdev
->mc
.vram_start
));
495 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
496 (u32
)rdev
->mc
.vram_start
);
497 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
498 (u32
)rdev
->mc
.vram_start
);
500 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
501 upper_32_bits(rdev
->mc
.vram_start
));
502 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
503 upper_32_bits(rdev
->mc
.vram_start
));
504 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
505 (u32
)rdev
->mc
.vram_start
);
506 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
507 (u32
)rdev
->mc
.vram_start
);
509 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
510 upper_32_bits(rdev
->mc
.vram_start
));
511 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
512 upper_32_bits(rdev
->mc
.vram_start
));
513 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
514 (u32
)rdev
->mc
.vram_start
);
515 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
516 (u32
)rdev
->mc
.vram_start
);
518 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
519 upper_32_bits(rdev
->mc
.vram_start
));
520 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
521 upper_32_bits(rdev
->mc
.vram_start
));
522 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
523 (u32
)rdev
->mc
.vram_start
);
524 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
525 (u32
)rdev
->mc
.vram_start
);
527 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
528 upper_32_bits(rdev
->mc
.vram_start
));
529 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
530 upper_32_bits(rdev
->mc
.vram_start
));
531 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
532 (u32
)rdev
->mc
.vram_start
);
533 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
534 (u32
)rdev
->mc
.vram_start
);
536 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
537 upper_32_bits(rdev
->mc
.vram_start
));
538 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
539 upper_32_bits(rdev
->mc
.vram_start
));
540 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
541 (u32
)rdev
->mc
.vram_start
);
542 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
543 (u32
)rdev
->mc
.vram_start
);
545 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH
, upper_32_bits(rdev
->mc
.vram_start
));
546 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS
, (u32
)rdev
->mc
.vram_start
);
547 /* Unlock host access */
548 WREG32(VGA_HDP_CONTROL
, save
->vga_hdp_control
);
550 /* Restore video state */
551 WREG32(D1VGA_CONTROL
, save
->vga_control
[0]);
552 WREG32(D2VGA_CONTROL
, save
->vga_control
[1]);
553 WREG32(EVERGREEN_D3VGA_CONTROL
, save
->vga_control
[2]);
554 WREG32(EVERGREEN_D4VGA_CONTROL
, save
->vga_control
[3]);
555 WREG32(EVERGREEN_D5VGA_CONTROL
, save
->vga_control
[4]);
556 WREG32(EVERGREEN_D6VGA_CONTROL
, save
->vga_control
[5]);
557 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 1);
558 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 1);
559 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 1);
560 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 1);
561 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 1);
562 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 1);
563 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, save
->crtc_control
[0]);
564 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, save
->crtc_control
[1]);
565 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, save
->crtc_control
[2]);
566 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, save
->crtc_control
[3]);
567 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, save
->crtc_control
[4]);
568 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, save
->crtc_control
[5]);
569 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
570 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
571 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
572 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
573 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
574 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
575 WREG32(VGA_RENDER_CONTROL
, save
->vga_render_control
);
578 static void evergreen_mc_program(struct radeon_device
*rdev
)
580 struct evergreen_mc_save save
;
585 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
586 WREG32((0x2c14 + j
), 0x00000000);
587 WREG32((0x2c18 + j
), 0x00000000);
588 WREG32((0x2c1c + j
), 0x00000000);
589 WREG32((0x2c20 + j
), 0x00000000);
590 WREG32((0x2c24 + j
), 0x00000000);
592 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
594 evergreen_mc_stop(rdev
, &save
);
595 if (evergreen_mc_wait_for_idle(rdev
)) {
596 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
598 /* Lockout access through VGA aperture*/
599 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
600 /* Update configuration */
601 if (rdev
->flags
& RADEON_IS_AGP
) {
602 if (rdev
->mc
.vram_start
< rdev
->mc
.gtt_start
) {
603 /* VRAM before AGP */
604 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
605 rdev
->mc
.vram_start
>> 12);
606 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
607 rdev
->mc
.gtt_end
>> 12);
610 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
611 rdev
->mc
.gtt_start
>> 12);
612 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
613 rdev
->mc
.vram_end
>> 12);
616 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
617 rdev
->mc
.vram_start
>> 12);
618 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
619 rdev
->mc
.vram_end
>> 12);
621 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
, 0);
622 tmp
= ((rdev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
623 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
624 WREG32(MC_VM_FB_LOCATION
, tmp
);
625 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
626 WREG32(HDP_NONSURFACE_INFO
, (2 << 7));
627 WREG32(HDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
628 if (rdev
->flags
& RADEON_IS_AGP
) {
629 WREG32(MC_VM_AGP_TOP
, rdev
->mc
.gtt_end
>> 16);
630 WREG32(MC_VM_AGP_BOT
, rdev
->mc
.gtt_start
>> 16);
631 WREG32(MC_VM_AGP_BASE
, rdev
->mc
.agp_base
>> 22);
633 WREG32(MC_VM_AGP_BASE
, 0);
634 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
635 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
637 if (evergreen_mc_wait_for_idle(rdev
)) {
638 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
640 evergreen_mc_resume(rdev
, &save
);
641 /* we need to own VRAM, so turn off the VGA renderer here
642 * to stop it overwriting our objects */
643 rv515_vga_render_disable(rdev
);
650 static int evergreen_cp_load_microcode(struct radeon_device
*rdev
)
652 const __be32
*fw_data
;
655 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
659 WREG32(CP_RB_CNTL
, RB_NO_UPDATE
| (15 << 8) | (3 << 0));
661 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
662 WREG32(CP_PFP_UCODE_ADDR
, 0);
663 for (i
= 0; i
< EVERGREEN_PFP_UCODE_SIZE
; i
++)
664 WREG32(CP_PFP_UCODE_DATA
, be32_to_cpup(fw_data
++));
665 WREG32(CP_PFP_UCODE_ADDR
, 0);
667 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
668 WREG32(CP_ME_RAM_WADDR
, 0);
669 for (i
= 0; i
< EVERGREEN_PM4_UCODE_SIZE
; i
++)
670 WREG32(CP_ME_RAM_DATA
, be32_to_cpup(fw_data
++));
672 WREG32(CP_PFP_UCODE_ADDR
, 0);
673 WREG32(CP_ME_RAM_WADDR
, 0);
674 WREG32(CP_ME_RAM_RADDR
, 0);
678 static int evergreen_cp_start(struct radeon_device
*rdev
)
683 r
= radeon_ring_lock(rdev
, 7);
685 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
688 radeon_ring_write(rdev
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
689 radeon_ring_write(rdev
, 0x1);
690 radeon_ring_write(rdev
, 0x0);
691 radeon_ring_write(rdev
, rdev
->config
.evergreen
.max_hw_contexts
- 1);
692 radeon_ring_write(rdev
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
693 radeon_ring_write(rdev
, 0);
694 radeon_ring_write(rdev
, 0);
695 radeon_ring_unlock_commit(rdev
);
698 WREG32(CP_ME_CNTL
, cp_me
);
700 r
= radeon_ring_lock(rdev
, 4);
702 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
705 /* init some VGT regs */
706 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
707 radeon_ring_write(rdev
, (VGT_VERTEX_REUSE_BLOCK_CNTL
- PACKET3_SET_CONTEXT_REG_START
) >> 2);
708 radeon_ring_write(rdev
, 0xe);
709 radeon_ring_write(rdev
, 0x10);
710 radeon_ring_unlock_commit(rdev
);
715 int evergreen_cp_resume(struct radeon_device
*rdev
)
721 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
722 WREG32(GRBM_SOFT_RESET
, (SOFT_RESET_CP
|
727 RREG32(GRBM_SOFT_RESET
);
729 WREG32(GRBM_SOFT_RESET
, 0);
730 RREG32(GRBM_SOFT_RESET
);
732 /* Set ring buffer size */
733 rb_bufsz
= drm_order(rdev
->cp
.ring_size
/ 8);
734 tmp
= RB_NO_UPDATE
| (drm_order(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
736 tmp
|= BUF_SWAP_32BIT
;
738 WREG32(CP_RB_CNTL
, tmp
);
739 WREG32(CP_SEM_WAIT_TIMER
, 0x4);
741 /* Set the write pointer delay */
742 WREG32(CP_RB_WPTR_DELAY
, 0);
744 /* Initialize the ring buffer's read and write pointers */
745 WREG32(CP_RB_CNTL
, tmp
| RB_RPTR_WR_ENA
);
746 WREG32(CP_RB_RPTR_WR
, 0);
747 WREG32(CP_RB_WPTR
, 0);
748 WREG32(CP_RB_RPTR_ADDR
, rdev
->cp
.gpu_addr
& 0xFFFFFFFF);
749 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->cp
.gpu_addr
));
751 WREG32(CP_RB_CNTL
, tmp
);
753 WREG32(CP_RB_BASE
, rdev
->cp
.gpu_addr
>> 8);
754 WREG32(CP_DEBUG
, (1 << 27) | (1 << 28));
756 rdev
->cp
.rptr
= RREG32(CP_RB_RPTR
);
757 rdev
->cp
.wptr
= RREG32(CP_RB_WPTR
);
759 evergreen_cp_start(rdev
);
760 rdev
->cp
.ready
= true;
761 r
= radeon_ring_test(rdev
);
763 rdev
->cp
.ready
= false;
772 static u32
evergreen_get_tile_pipe_to_backend_map(struct radeon_device
*rdev
,
775 u32 backend_disable_mask
)
778 u32 enabled_backends_mask
= 0;
779 u32 enabled_backends_count
= 0;
781 u32 swizzle_pipe
[EVERGREEN_MAX_PIPES
];
784 bool force_no_swizzle
;
786 if (num_tile_pipes
> EVERGREEN_MAX_PIPES
)
787 num_tile_pipes
= EVERGREEN_MAX_PIPES
;
788 if (num_tile_pipes
< 1)
790 if (num_backends
> EVERGREEN_MAX_BACKENDS
)
791 num_backends
= EVERGREEN_MAX_BACKENDS
;
792 if (num_backends
< 1)
795 for (i
= 0; i
< EVERGREEN_MAX_BACKENDS
; ++i
) {
796 if (((backend_disable_mask
>> i
) & 1) == 0) {
797 enabled_backends_mask
|= (1 << i
);
798 ++enabled_backends_count
;
800 if (enabled_backends_count
== num_backends
)
804 if (enabled_backends_count
== 0) {
805 enabled_backends_mask
= 1;
806 enabled_backends_count
= 1;
809 if (enabled_backends_count
!= num_backends
)
810 num_backends
= enabled_backends_count
;
812 memset((uint8_t *)&swizzle_pipe
[0], 0, sizeof(u32
) * EVERGREEN_MAX_PIPES
);
813 switch (rdev
->family
) {
816 force_no_swizzle
= false;
822 force_no_swizzle
= true;
825 if (force_no_swizzle
) {
826 bool last_backend_enabled
= false;
828 force_no_swizzle
= false;
829 for (i
= 0; i
< EVERGREEN_MAX_BACKENDS
; ++i
) {
830 if (((enabled_backends_mask
>> i
) & 1) == 1) {
831 if (last_backend_enabled
)
832 force_no_swizzle
= true;
833 last_backend_enabled
= true;
835 last_backend_enabled
= false;
839 switch (num_tile_pipes
) {
844 DRM_ERROR("odd number of pipes!\n");
851 if (force_no_swizzle
) {
864 if (force_no_swizzle
) {
881 if (force_no_swizzle
) {
903 for (cur_pipe
= 0; cur_pipe
< num_tile_pipes
; ++cur_pipe
) {
904 while (((1 << cur_backend
) & enabled_backends_mask
) == 0)
905 cur_backend
= (cur_backend
+ 1) % EVERGREEN_MAX_BACKENDS
;
907 backend_map
|= (((cur_backend
& 0xf) << (swizzle_pipe
[cur_pipe
] * 4)));
909 cur_backend
= (cur_backend
+ 1) % EVERGREEN_MAX_BACKENDS
;
915 static void evergreen_gpu_init(struct radeon_device
*rdev
)
917 u32 cc_rb_backend_disable
= 0;
918 u32 cc_gc_shader_pipe_config
;
919 u32 gb_addr_config
= 0;
920 u32 mc_shared_chmap
, mc_arb_ramcfg
;
926 u32 sq_lds_resource_mgmt
;
927 u32 sq_gpr_resource_mgmt_1
;
928 u32 sq_gpr_resource_mgmt_2
;
929 u32 sq_gpr_resource_mgmt_3
;
930 u32 sq_thread_resource_mgmt
;
931 u32 sq_thread_resource_mgmt_2
;
932 u32 sq_stack_resource_mgmt_1
;
933 u32 sq_stack_resource_mgmt_2
;
934 u32 sq_stack_resource_mgmt_3
;
935 u32 vgt_cache_invalidation
;
936 u32 hdp_host_path_cntl
;
937 int i
, j
, num_shader_engines
, ps_thread_count
;
939 switch (rdev
->family
) {
942 rdev
->config
.evergreen
.num_ses
= 2;
943 rdev
->config
.evergreen
.max_pipes
= 4;
944 rdev
->config
.evergreen
.max_tile_pipes
= 8;
945 rdev
->config
.evergreen
.max_simds
= 10;
946 rdev
->config
.evergreen
.max_backends
= 4 * rdev
->config
.evergreen
.num_ses
;
947 rdev
->config
.evergreen
.max_gprs
= 256;
948 rdev
->config
.evergreen
.max_threads
= 248;
949 rdev
->config
.evergreen
.max_gs_threads
= 32;
950 rdev
->config
.evergreen
.max_stack_entries
= 512;
951 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
952 rdev
->config
.evergreen
.sx_max_export_size
= 256;
953 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
954 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
955 rdev
->config
.evergreen
.max_hw_contexts
= 8;
956 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
958 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
959 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
960 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
963 rdev
->config
.evergreen
.num_ses
= 1;
964 rdev
->config
.evergreen
.max_pipes
= 4;
965 rdev
->config
.evergreen
.max_tile_pipes
= 4;
966 rdev
->config
.evergreen
.max_simds
= 10;
967 rdev
->config
.evergreen
.max_backends
= 4 * rdev
->config
.evergreen
.num_ses
;
968 rdev
->config
.evergreen
.max_gprs
= 256;
969 rdev
->config
.evergreen
.max_threads
= 248;
970 rdev
->config
.evergreen
.max_gs_threads
= 32;
971 rdev
->config
.evergreen
.max_stack_entries
= 512;
972 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
973 rdev
->config
.evergreen
.sx_max_export_size
= 256;
974 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
975 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
976 rdev
->config
.evergreen
.max_hw_contexts
= 8;
977 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
979 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
980 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
981 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
984 rdev
->config
.evergreen
.num_ses
= 1;
985 rdev
->config
.evergreen
.max_pipes
= 4;
986 rdev
->config
.evergreen
.max_tile_pipes
= 4;
987 rdev
->config
.evergreen
.max_simds
= 5;
988 rdev
->config
.evergreen
.max_backends
= 2 * rdev
->config
.evergreen
.num_ses
;
989 rdev
->config
.evergreen
.max_gprs
= 256;
990 rdev
->config
.evergreen
.max_threads
= 248;
991 rdev
->config
.evergreen
.max_gs_threads
= 32;
992 rdev
->config
.evergreen
.max_stack_entries
= 256;
993 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
994 rdev
->config
.evergreen
.sx_max_export_size
= 256;
995 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
996 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
997 rdev
->config
.evergreen
.max_hw_contexts
= 8;
998 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1000 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1001 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1002 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1006 rdev
->config
.evergreen
.num_ses
= 1;
1007 rdev
->config
.evergreen
.max_pipes
= 2;
1008 rdev
->config
.evergreen
.max_tile_pipes
= 2;
1009 rdev
->config
.evergreen
.max_simds
= 2;
1010 rdev
->config
.evergreen
.max_backends
= 1 * rdev
->config
.evergreen
.num_ses
;
1011 rdev
->config
.evergreen
.max_gprs
= 256;
1012 rdev
->config
.evergreen
.max_threads
= 192;
1013 rdev
->config
.evergreen
.max_gs_threads
= 16;
1014 rdev
->config
.evergreen
.max_stack_entries
= 256;
1015 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1016 rdev
->config
.evergreen
.sx_max_export_size
= 128;
1017 rdev
->config
.evergreen
.sx_max_export_pos_size
= 32;
1018 rdev
->config
.evergreen
.sx_max_export_smx_size
= 96;
1019 rdev
->config
.evergreen
.max_hw_contexts
= 4;
1020 rdev
->config
.evergreen
.sq_num_cf_insts
= 1;
1022 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1023 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1024 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1028 /* Initialize HDP */
1029 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1030 WREG32((0x2c14 + j
), 0x00000000);
1031 WREG32((0x2c18 + j
), 0x00000000);
1032 WREG32((0x2c1c + j
), 0x00000000);
1033 WREG32((0x2c20 + j
), 0x00000000);
1034 WREG32((0x2c24 + j
), 0x00000000);
1037 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
1039 cc_gc_shader_pipe_config
= RREG32(CC_GC_SHADER_PIPE_CONFIG
) & ~2;
1041 cc_gc_shader_pipe_config
|=
1042 INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK
<< rdev
->config
.evergreen
.max_pipes
)
1043 & EVERGREEN_MAX_PIPES_MASK
);
1044 cc_gc_shader_pipe_config
|=
1045 INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK
<< rdev
->config
.evergreen
.max_simds
)
1046 & EVERGREEN_MAX_SIMDS_MASK
);
1048 cc_rb_backend_disable
=
1049 BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK
<< rdev
->config
.evergreen
.max_backends
)
1050 & EVERGREEN_MAX_BACKENDS_MASK
);
1053 mc_shared_chmap
= RREG32(MC_SHARED_CHMAP
);
1054 mc_arb_ramcfg
= RREG32(MC_ARB_RAMCFG
);
1056 switch (rdev
->config
.evergreen
.max_tile_pipes
) {
1059 gb_addr_config
|= NUM_PIPES(0);
1062 gb_addr_config
|= NUM_PIPES(1);
1065 gb_addr_config
|= NUM_PIPES(2);
1068 gb_addr_config
|= NUM_PIPES(3);
1072 gb_addr_config
|= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
);
1073 gb_addr_config
|= BANK_INTERLEAVE_SIZE(0);
1074 gb_addr_config
|= NUM_SHADER_ENGINES(rdev
->config
.evergreen
.num_ses
- 1);
1075 gb_addr_config
|= SHADER_ENGINE_TILE_SIZE(1);
1076 gb_addr_config
|= NUM_GPUS(0); /* Hemlock? */
1077 gb_addr_config
|= MULTI_GPU_TILE_SIZE(2);
1079 if (((mc_arb_ramcfg
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
) > 2)
1080 gb_addr_config
|= ROW_SIZE(2);
1082 gb_addr_config
|= ROW_SIZE((mc_arb_ramcfg
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
);
1084 if (rdev
->ddev
->pdev
->device
== 0x689e) {
1087 u8 efuse_box_bit_131_124
;
1089 WREG32(RCU_IND_INDEX
, 0x204);
1090 efuse_straps_4
= RREG32(RCU_IND_DATA
);
1091 WREG32(RCU_IND_INDEX
, 0x203);
1092 efuse_straps_3
= RREG32(RCU_IND_DATA
);
1093 efuse_box_bit_131_124
= (u8
)(((efuse_straps_4
& 0xf) << 4) | ((efuse_straps_3
& 0xf0000000) >> 28));
1095 switch(efuse_box_bit_131_124
) {
1097 gb_backend_map
= 0x76543210;
1100 gb_backend_map
= 0x77553311;
1103 gb_backend_map
= 0x77553300;
1106 gb_backend_map
= 0x77552211;
1109 gb_backend_map
= 0x77443300;
1112 gb_backend_map
= 0x66552211;
1115 gb_backend_map
= 0x77552200;
1118 gb_backend_map
= 0x66442200;
1121 gb_backend_map
= 0x66553311;
1124 DRM_ERROR("bad backend map, using default\n");
1126 evergreen_get_tile_pipe_to_backend_map(rdev
,
1127 rdev
->config
.evergreen
.max_tile_pipes
,
1128 rdev
->config
.evergreen
.max_backends
,
1129 ((EVERGREEN_MAX_BACKENDS_MASK
<<
1130 rdev
->config
.evergreen
.max_backends
) &
1131 EVERGREEN_MAX_BACKENDS_MASK
));
1134 } else if (rdev
->ddev
->pdev
->device
== 0x68b9) {
1136 u8 efuse_box_bit_127_124
;
1138 WREG32(RCU_IND_INDEX
, 0x203);
1139 efuse_straps_3
= RREG32(RCU_IND_DATA
);
1140 efuse_box_bit_127_124
= (u8
)(efuse_straps_3
& 0xF0000000) >> 28;
1142 switch(efuse_box_bit_127_124
) {
1144 gb_backend_map
= 0x00003210;
1150 gb_backend_map
= 0x00003311;
1153 DRM_ERROR("bad backend map, using default\n");
1155 evergreen_get_tile_pipe_to_backend_map(rdev
,
1156 rdev
->config
.evergreen
.max_tile_pipes
,
1157 rdev
->config
.evergreen
.max_backends
,
1158 ((EVERGREEN_MAX_BACKENDS_MASK
<<
1159 rdev
->config
.evergreen
.max_backends
) &
1160 EVERGREEN_MAX_BACKENDS_MASK
));
1164 switch (rdev
->family
) {
1167 gb_backend_map
= 0x66442200;
1170 gb_backend_map
= 0x00006420;
1174 evergreen_get_tile_pipe_to_backend_map(rdev
,
1175 rdev
->config
.evergreen
.max_tile_pipes
,
1176 rdev
->config
.evergreen
.max_backends
,
1177 ((EVERGREEN_MAX_BACKENDS_MASK
<<
1178 rdev
->config
.evergreen
.max_backends
) &
1179 EVERGREEN_MAX_BACKENDS_MASK
));
1183 rdev
->config
.evergreen
.tile_config
= gb_addr_config
;
1184 WREG32(GB_BACKEND_MAP
, gb_backend_map
);
1185 WREG32(GB_ADDR_CONFIG
, gb_addr_config
);
1186 WREG32(DMIF_ADDR_CONFIG
, gb_addr_config
);
1187 WREG32(HDP_ADDR_CONFIG
, gb_addr_config
);
1189 num_shader_engines
= ((RREG32(GB_ADDR_CONFIG
) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
1190 grbm_gfx_index
= INSTANCE_BROADCAST_WRITES
;
1192 for (i
= 0; i
< rdev
->config
.evergreen
.num_ses
; i
++) {
1193 u32 rb
= cc_rb_backend_disable
| (0xf0 << 16);
1194 u32 sp
= cc_gc_shader_pipe_config
;
1195 u32 gfx
= grbm_gfx_index
| SE_INDEX(i
);
1197 if (i
== num_shader_engines
) {
1198 rb
|= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK
);
1199 sp
|= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK
);
1202 WREG32(GRBM_GFX_INDEX
, gfx
);
1203 WREG32(RLC_GFX_INDEX
, gfx
);
1205 WREG32(CC_RB_BACKEND_DISABLE
, rb
);
1206 WREG32(CC_SYS_RB_BACKEND_DISABLE
, rb
);
1207 WREG32(GC_USER_RB_BACKEND_DISABLE
, rb
);
1208 WREG32(CC_GC_SHADER_PIPE_CONFIG
, sp
);
1211 grbm_gfx_index
|= SE_BROADCAST_WRITES
;
1212 WREG32(GRBM_GFX_INDEX
, grbm_gfx_index
);
1213 WREG32(RLC_GFX_INDEX
, grbm_gfx_index
);
1215 WREG32(CGTS_SYS_TCC_DISABLE
, 0);
1216 WREG32(CGTS_TCC_DISABLE
, 0);
1217 WREG32(CGTS_USER_SYS_TCC_DISABLE
, 0);
1218 WREG32(CGTS_USER_TCC_DISABLE
, 0);
1220 /* set HW defaults for 3D engine */
1221 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) |
1222 ROQ_IB2_START(0x2b)));
1224 WREG32(CP_MEQ_THRESHOLDS
, STQ_SPLIT(0x30));
1226 WREG32(TA_CNTL_AUX
, (DISABLE_CUBE_ANISO
|
1231 sx_debug_1
= RREG32(SX_DEBUG_1
);
1232 sx_debug_1
|= ENABLE_NEW_SMX_ADDRESS
;
1233 WREG32(SX_DEBUG_1
, sx_debug_1
);
1236 smx_dc_ctl0
= RREG32(SMX_DC_CTL0
);
1237 smx_dc_ctl0
&= ~NUMBER_OF_SETS(0x1ff);
1238 smx_dc_ctl0
|= NUMBER_OF_SETS(rdev
->config
.evergreen
.sx_num_of_sets
);
1239 WREG32(SMX_DC_CTL0
, smx_dc_ctl0
);
1241 WREG32(SX_EXPORT_BUFFER_SIZES
, (COLOR_BUFFER_SIZE((rdev
->config
.evergreen
.sx_max_export_size
/ 4) - 1) |
1242 POSITION_BUFFER_SIZE((rdev
->config
.evergreen
.sx_max_export_pos_size
/ 4) - 1) |
1243 SMX_BUFFER_SIZE((rdev
->config
.evergreen
.sx_max_export_smx_size
/ 4) - 1)));
1245 WREG32(PA_SC_FIFO_SIZE
, (SC_PRIM_FIFO_SIZE(rdev
->config
.evergreen
.sc_prim_fifo_size
) |
1246 SC_HIZ_TILE_FIFO_SIZE(rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
) |
1247 SC_EARLYZ_TILE_FIFO_SIZE(rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
)));
1249 WREG32(VGT_NUM_INSTANCES
, 1);
1250 WREG32(SPI_CONFIG_CNTL
, 0);
1251 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(4));
1252 WREG32(CP_PERFMON_CNTL
, 0);
1254 WREG32(SQ_MS_FIFO_SIZES
, (CACHE_FIFO_SIZE(16 * rdev
->config
.evergreen
.sq_num_cf_insts
) |
1255 FETCH_FIFO_HIWATER(0x4) |
1256 DONE_FIFO_HIWATER(0xe0) |
1257 ALU_UPDATE_FIFO_HIWATER(0x8)));
1259 sq_config
= RREG32(SQ_CONFIG
);
1260 sq_config
&= ~(PS_PRIO(3) |
1264 sq_config
|= (VC_ENABLE
|
1271 if (rdev
->family
== CHIP_CEDAR
)
1272 /* no vertex cache */
1273 sq_config
&= ~VC_ENABLE
;
1275 sq_lds_resource_mgmt
= RREG32(SQ_LDS_RESOURCE_MGMT
);
1277 sq_gpr_resource_mgmt_1
= NUM_PS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2))* 12 / 32);
1278 sq_gpr_resource_mgmt_1
|= NUM_VS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 6 / 32);
1279 sq_gpr_resource_mgmt_1
|= NUM_CLAUSE_TEMP_GPRS(4);
1280 sq_gpr_resource_mgmt_2
= NUM_GS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 4 / 32);
1281 sq_gpr_resource_mgmt_2
|= NUM_ES_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 4 / 32);
1282 sq_gpr_resource_mgmt_3
= NUM_HS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 3 / 32);
1283 sq_gpr_resource_mgmt_3
|= NUM_LS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 3 / 32);
1285 if (rdev
->family
== CHIP_CEDAR
)
1286 ps_thread_count
= 96;
1288 ps_thread_count
= 128;
1290 sq_thread_resource_mgmt
= NUM_PS_THREADS(ps_thread_count
);
1291 sq_thread_resource_mgmt
|= NUM_VS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
1292 sq_thread_resource_mgmt
|= NUM_GS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
1293 sq_thread_resource_mgmt
|= NUM_ES_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
1294 sq_thread_resource_mgmt_2
= NUM_HS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
1295 sq_thread_resource_mgmt_2
|= NUM_LS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
1297 sq_stack_resource_mgmt_1
= NUM_PS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
1298 sq_stack_resource_mgmt_1
|= NUM_VS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
1299 sq_stack_resource_mgmt_2
= NUM_GS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
1300 sq_stack_resource_mgmt_2
|= NUM_ES_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
1301 sq_stack_resource_mgmt_3
= NUM_HS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
1302 sq_stack_resource_mgmt_3
|= NUM_LS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
1304 WREG32(SQ_CONFIG
, sq_config
);
1305 WREG32(SQ_GPR_RESOURCE_MGMT_1
, sq_gpr_resource_mgmt_1
);
1306 WREG32(SQ_GPR_RESOURCE_MGMT_2
, sq_gpr_resource_mgmt_2
);
1307 WREG32(SQ_GPR_RESOURCE_MGMT_3
, sq_gpr_resource_mgmt_3
);
1308 WREG32(SQ_THREAD_RESOURCE_MGMT
, sq_thread_resource_mgmt
);
1309 WREG32(SQ_THREAD_RESOURCE_MGMT_2
, sq_thread_resource_mgmt_2
);
1310 WREG32(SQ_STACK_RESOURCE_MGMT_1
, sq_stack_resource_mgmt_1
);
1311 WREG32(SQ_STACK_RESOURCE_MGMT_2
, sq_stack_resource_mgmt_2
);
1312 WREG32(SQ_STACK_RESOURCE_MGMT_3
, sq_stack_resource_mgmt_3
);
1313 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
, 0);
1314 WREG32(SQ_LDS_RESOURCE_MGMT
, sq_lds_resource_mgmt
);
1316 WREG32(PA_SC_FORCE_EOV_MAX_CNTS
, (FORCE_EOV_MAX_CLK_CNT(4095) |
1317 FORCE_EOV_MAX_REZ_CNT(255)));
1319 if (rdev
->family
== CHIP_CEDAR
)
1320 vgt_cache_invalidation
= CACHE_INVALIDATION(TC_ONLY
);
1322 vgt_cache_invalidation
= CACHE_INVALIDATION(VC_AND_TC
);
1323 vgt_cache_invalidation
|= AUTO_INVLD_EN(ES_AND_GS_AUTO
);
1324 WREG32(VGT_CACHE_INVALIDATION
, vgt_cache_invalidation
);
1326 WREG32(VGT_GS_VERTEX_REUSE
, 16);
1327 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
1329 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL
, 14);
1330 WREG32(VGT_OUT_DEALLOC_CNTL
, 16);
1332 WREG32(CB_PERF_CTR0_SEL_0
, 0);
1333 WREG32(CB_PERF_CTR0_SEL_1
, 0);
1334 WREG32(CB_PERF_CTR1_SEL_0
, 0);
1335 WREG32(CB_PERF_CTR1_SEL_1
, 0);
1336 WREG32(CB_PERF_CTR2_SEL_0
, 0);
1337 WREG32(CB_PERF_CTR2_SEL_1
, 0);
1338 WREG32(CB_PERF_CTR3_SEL_0
, 0);
1339 WREG32(CB_PERF_CTR3_SEL_1
, 0);
1341 /* clear render buffer base addresses */
1342 WREG32(CB_COLOR0_BASE
, 0);
1343 WREG32(CB_COLOR1_BASE
, 0);
1344 WREG32(CB_COLOR2_BASE
, 0);
1345 WREG32(CB_COLOR3_BASE
, 0);
1346 WREG32(CB_COLOR4_BASE
, 0);
1347 WREG32(CB_COLOR5_BASE
, 0);
1348 WREG32(CB_COLOR6_BASE
, 0);
1349 WREG32(CB_COLOR7_BASE
, 0);
1350 WREG32(CB_COLOR8_BASE
, 0);
1351 WREG32(CB_COLOR9_BASE
, 0);
1352 WREG32(CB_COLOR10_BASE
, 0);
1353 WREG32(CB_COLOR11_BASE
, 0);
1355 /* set the shader const cache sizes to 0 */
1356 for (i
= SQ_ALU_CONST_BUFFER_SIZE_PS_0
; i
< 0x28200; i
+= 4)
1358 for (i
= SQ_ALU_CONST_BUFFER_SIZE_HS_0
; i
< 0x29000; i
+= 4)
1361 hdp_host_path_cntl
= RREG32(HDP_HOST_PATH_CNTL
);
1362 WREG32(HDP_HOST_PATH_CNTL
, hdp_host_path_cntl
);
1364 WREG32(PA_CL_ENHANCE
, CLIP_VTX_REORDER_ENA
| NUM_CLIP_SEQ(3));
1370 int evergreen_mc_init(struct radeon_device
*rdev
)
1373 int chansize
, numchan
;
1375 /* Get VRAM informations */
1376 rdev
->mc
.vram_is_ddr
= true;
1377 tmp
= RREG32(MC_ARB_RAMCFG
);
1378 if (tmp
& CHANSIZE_OVERRIDE
) {
1380 } else if (tmp
& CHANSIZE_MASK
) {
1385 tmp
= RREG32(MC_SHARED_CHMAP
);
1386 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
1401 rdev
->mc
.vram_width
= numchan
* chansize
;
1402 /* Could aper size report 0 ? */
1403 rdev
->mc
.aper_base
= pci_resource_start(rdev
->pdev
, 0);
1404 rdev
->mc
.aper_size
= pci_resource_len(rdev
->pdev
, 0);
1405 /* Setup GPU memory space */
1406 /* size in MB on evergreen */
1407 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
) * 1024 * 1024;
1408 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
) * 1024 * 1024;
1409 rdev
->mc
.visible_vram_size
= rdev
->mc
.aper_size
;
1410 r600_vram_gtt_location(rdev
, &rdev
->mc
);
1411 radeon_update_bandwidth_info(rdev
);
1416 bool evergreen_gpu_is_lockup(struct radeon_device
*rdev
)
1418 /* FIXME: implement for evergreen */
1422 static int evergreen_gpu_soft_reset(struct radeon_device
*rdev
)
1424 struct evergreen_mc_save save
;
1428 dev_info(rdev
->dev
, "GPU softreset \n");
1429 dev_info(rdev
->dev
, " GRBM_STATUS=0x%08X\n",
1430 RREG32(GRBM_STATUS
));
1431 dev_info(rdev
->dev
, " GRBM_STATUS_SE0=0x%08X\n",
1432 RREG32(GRBM_STATUS_SE0
));
1433 dev_info(rdev
->dev
, " GRBM_STATUS_SE1=0x%08X\n",
1434 RREG32(GRBM_STATUS_SE1
));
1435 dev_info(rdev
->dev
, " SRBM_STATUS=0x%08X\n",
1436 RREG32(SRBM_STATUS
));
1437 evergreen_mc_stop(rdev
, &save
);
1438 if (evergreen_mc_wait_for_idle(rdev
)) {
1439 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1441 /* Disable CP parsing/prefetching */
1442 WREG32(CP_ME_CNTL
, CP_ME_HALT
| CP_PFP_HALT
);
1444 /* reset all the gfx blocks */
1445 grbm_reset
= (SOFT_RESET_CP
|
1458 dev_info(rdev
->dev
, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset
);
1459 WREG32(GRBM_SOFT_RESET
, grbm_reset
);
1460 (void)RREG32(GRBM_SOFT_RESET
);
1462 WREG32(GRBM_SOFT_RESET
, 0);
1463 (void)RREG32(GRBM_SOFT_RESET
);
1465 /* reset all the system blocks */
1466 srbm_reset
= SRBM_SOFT_RESET_ALL_MASK
;
1468 dev_info(rdev
->dev
, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset
);
1469 WREG32(SRBM_SOFT_RESET
, srbm_reset
);
1470 (void)RREG32(SRBM_SOFT_RESET
);
1472 WREG32(SRBM_SOFT_RESET
, 0);
1473 (void)RREG32(SRBM_SOFT_RESET
);
1474 /* Wait a little for things to settle down */
1476 dev_info(rdev
->dev
, " GRBM_STATUS=0x%08X\n",
1477 RREG32(GRBM_STATUS
));
1478 dev_info(rdev
->dev
, " GRBM_STATUS_SE0=0x%08X\n",
1479 RREG32(GRBM_STATUS_SE0
));
1480 dev_info(rdev
->dev
, " GRBM_STATUS_SE1=0x%08X\n",
1481 RREG32(GRBM_STATUS_SE1
));
1482 dev_info(rdev
->dev
, " SRBM_STATUS=0x%08X\n",
1483 RREG32(SRBM_STATUS
));
1484 /* After reset we need to reinit the asic as GPU often endup in an
1487 atom_asic_init(rdev
->mode_info
.atom_context
);
1488 evergreen_mc_resume(rdev
, &save
);
1492 int evergreen_asic_reset(struct radeon_device
*rdev
)
1494 return evergreen_gpu_soft_reset(rdev
);
1499 u32
evergreen_get_vblank_counter(struct radeon_device
*rdev
, int crtc
)
1503 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC0_REGISTER_OFFSET
);
1505 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
1507 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC2_REGISTER_OFFSET
);
1509 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC3_REGISTER_OFFSET
);
1511 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC4_REGISTER_OFFSET
);
1513 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
1519 void evergreen_disable_interrupt_state(struct radeon_device
*rdev
)
1523 WREG32(CP_INT_CNTL
, 0);
1524 WREG32(GRBM_INT_CNTL
, 0);
1525 WREG32(INT_MASK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
1526 WREG32(INT_MASK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
1527 WREG32(INT_MASK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
1528 WREG32(INT_MASK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
1529 WREG32(INT_MASK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
1530 WREG32(INT_MASK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
1532 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
1533 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
1534 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
1535 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
1536 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
1537 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
1539 WREG32(DACA_AUTODETECT_INT_CONTROL
, 0);
1540 WREG32(DACB_AUTODETECT_INT_CONTROL
, 0);
1542 tmp
= RREG32(DC_HPD1_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
1543 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
1544 tmp
= RREG32(DC_HPD2_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
1545 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
1546 tmp
= RREG32(DC_HPD3_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
1547 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
1548 tmp
= RREG32(DC_HPD4_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
1549 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
1550 tmp
= RREG32(DC_HPD5_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
1551 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
1552 tmp
= RREG32(DC_HPD6_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
1553 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
1557 int evergreen_irq_set(struct radeon_device
*rdev
)
1559 u32 cp_int_cntl
= CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
;
1560 u32 crtc1
= 0, crtc2
= 0, crtc3
= 0, crtc4
= 0, crtc5
= 0, crtc6
= 0;
1561 u32 hpd1
, hpd2
, hpd3
, hpd4
, hpd5
, hpd6
;
1562 u32 grbm_int_cntl
= 0;
1564 if (!rdev
->irq
.installed
) {
1565 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
1568 /* don't enable anything if the ih is disabled */
1569 if (!rdev
->ih
.enabled
) {
1570 r600_disable_interrupts(rdev
);
1571 /* force the active interrupt state to all disabled */
1572 evergreen_disable_interrupt_state(rdev
);
1576 hpd1
= RREG32(DC_HPD1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
1577 hpd2
= RREG32(DC_HPD2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
1578 hpd3
= RREG32(DC_HPD3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
1579 hpd4
= RREG32(DC_HPD4_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
1580 hpd5
= RREG32(DC_HPD5_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
1581 hpd6
= RREG32(DC_HPD6_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
1583 if (rdev
->irq
.sw_int
) {
1584 DRM_DEBUG("evergreen_irq_set: sw int\n");
1585 cp_int_cntl
|= RB_INT_ENABLE
;
1587 if (rdev
->irq
.crtc_vblank_int
[0]) {
1588 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
1589 crtc1
|= VBLANK_INT_MASK
;
1591 if (rdev
->irq
.crtc_vblank_int
[1]) {
1592 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
1593 crtc2
|= VBLANK_INT_MASK
;
1595 if (rdev
->irq
.crtc_vblank_int
[2]) {
1596 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
1597 crtc3
|= VBLANK_INT_MASK
;
1599 if (rdev
->irq
.crtc_vblank_int
[3]) {
1600 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
1601 crtc4
|= VBLANK_INT_MASK
;
1603 if (rdev
->irq
.crtc_vblank_int
[4]) {
1604 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
1605 crtc5
|= VBLANK_INT_MASK
;
1607 if (rdev
->irq
.crtc_vblank_int
[5]) {
1608 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
1609 crtc6
|= VBLANK_INT_MASK
;
1611 if (rdev
->irq
.hpd
[0]) {
1612 DRM_DEBUG("evergreen_irq_set: hpd 1\n");
1613 hpd1
|= DC_HPDx_INT_EN
;
1615 if (rdev
->irq
.hpd
[1]) {
1616 DRM_DEBUG("evergreen_irq_set: hpd 2\n");
1617 hpd2
|= DC_HPDx_INT_EN
;
1619 if (rdev
->irq
.hpd
[2]) {
1620 DRM_DEBUG("evergreen_irq_set: hpd 3\n");
1621 hpd3
|= DC_HPDx_INT_EN
;
1623 if (rdev
->irq
.hpd
[3]) {
1624 DRM_DEBUG("evergreen_irq_set: hpd 4\n");
1625 hpd4
|= DC_HPDx_INT_EN
;
1627 if (rdev
->irq
.hpd
[4]) {
1628 DRM_DEBUG("evergreen_irq_set: hpd 5\n");
1629 hpd5
|= DC_HPDx_INT_EN
;
1631 if (rdev
->irq
.hpd
[5]) {
1632 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
1633 hpd6
|= DC_HPDx_INT_EN
;
1635 if (rdev
->irq
.gui_idle
) {
1636 DRM_DEBUG("gui idle\n");
1637 grbm_int_cntl
|= GUI_IDLE_INT_ENABLE
;
1640 WREG32(CP_INT_CNTL
, cp_int_cntl
);
1641 WREG32(GRBM_INT_CNTL
, grbm_int_cntl
);
1643 WREG32(INT_MASK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, crtc1
);
1644 WREG32(INT_MASK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, crtc2
);
1645 WREG32(INT_MASK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, crtc3
);
1646 WREG32(INT_MASK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, crtc4
);
1647 WREG32(INT_MASK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, crtc5
);
1648 WREG32(INT_MASK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, crtc6
);
1650 WREG32(DC_HPD1_INT_CONTROL
, hpd1
);
1651 WREG32(DC_HPD2_INT_CONTROL
, hpd2
);
1652 WREG32(DC_HPD3_INT_CONTROL
, hpd3
);
1653 WREG32(DC_HPD4_INT_CONTROL
, hpd4
);
1654 WREG32(DC_HPD5_INT_CONTROL
, hpd5
);
1655 WREG32(DC_HPD6_INT_CONTROL
, hpd6
);
1660 static inline void evergreen_irq_ack(struct radeon_device
*rdev
,
1663 u32
*disp_int_cont2
,
1664 u32
*disp_int_cont3
,
1665 u32
*disp_int_cont4
,
1666 u32
*disp_int_cont5
)
1670 *disp_int
= RREG32(DISP_INTERRUPT_STATUS
);
1671 *disp_int_cont
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE
);
1672 *disp_int_cont2
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE2
);
1673 *disp_int_cont3
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE3
);
1674 *disp_int_cont4
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE4
);
1675 *disp_int_cont5
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE5
);
1677 if (*disp_int
& LB_D1_VBLANK_INTERRUPT
)
1678 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, VBLANK_ACK
);
1679 if (*disp_int
& LB_D1_VLINE_INTERRUPT
)
1680 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, VLINE_ACK
);
1682 if (*disp_int_cont
& LB_D2_VBLANK_INTERRUPT
)
1683 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, VBLANK_ACK
);
1684 if (*disp_int_cont
& LB_D2_VLINE_INTERRUPT
)
1685 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, VLINE_ACK
);
1687 if (*disp_int_cont2
& LB_D3_VBLANK_INTERRUPT
)
1688 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, VBLANK_ACK
);
1689 if (*disp_int_cont2
& LB_D3_VLINE_INTERRUPT
)
1690 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, VLINE_ACK
);
1692 if (*disp_int_cont3
& LB_D4_VBLANK_INTERRUPT
)
1693 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, VBLANK_ACK
);
1694 if (*disp_int_cont3
& LB_D4_VLINE_INTERRUPT
)
1695 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, VLINE_ACK
);
1697 if (*disp_int_cont4
& LB_D5_VBLANK_INTERRUPT
)
1698 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, VBLANK_ACK
);
1699 if (*disp_int_cont4
& LB_D5_VLINE_INTERRUPT
)
1700 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, VLINE_ACK
);
1702 if (*disp_int_cont5
& LB_D6_VBLANK_INTERRUPT
)
1703 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, VBLANK_ACK
);
1704 if (*disp_int_cont5
& LB_D6_VLINE_INTERRUPT
)
1705 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, VLINE_ACK
);
1707 if (*disp_int
& DC_HPD1_INTERRUPT
) {
1708 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
1709 tmp
|= DC_HPDx_INT_ACK
;
1710 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
1712 if (*disp_int_cont
& DC_HPD2_INTERRUPT
) {
1713 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
1714 tmp
|= DC_HPDx_INT_ACK
;
1715 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
1717 if (*disp_int_cont2
& DC_HPD3_INTERRUPT
) {
1718 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
1719 tmp
|= DC_HPDx_INT_ACK
;
1720 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
1722 if (*disp_int_cont3
& DC_HPD4_INTERRUPT
) {
1723 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
1724 tmp
|= DC_HPDx_INT_ACK
;
1725 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
1727 if (*disp_int_cont4
& DC_HPD5_INTERRUPT
) {
1728 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
1729 tmp
|= DC_HPDx_INT_ACK
;
1730 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
1732 if (*disp_int_cont5
& DC_HPD6_INTERRUPT
) {
1733 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
1734 tmp
|= DC_HPDx_INT_ACK
;
1735 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
1739 void evergreen_irq_disable(struct radeon_device
*rdev
)
1741 u32 disp_int
, disp_int_cont
, disp_int_cont2
;
1742 u32 disp_int_cont3
, disp_int_cont4
, disp_int_cont5
;
1744 r600_disable_interrupts(rdev
);
1745 /* Wait and acknowledge irq */
1747 evergreen_irq_ack(rdev
, &disp_int
, &disp_int_cont
, &disp_int_cont2
,
1748 &disp_int_cont3
, &disp_int_cont4
, &disp_int_cont5
);
1749 evergreen_disable_interrupt_state(rdev
);
1752 static void evergreen_irq_suspend(struct radeon_device
*rdev
)
1754 evergreen_irq_disable(rdev
);
1755 r600_rlc_stop(rdev
);
1758 static inline u32
evergreen_get_ih_wptr(struct radeon_device
*rdev
)
1762 /* XXX use writeback */
1763 wptr
= RREG32(IH_RB_WPTR
);
1765 if (wptr
& RB_OVERFLOW
) {
1766 /* When a ring buffer overflow happen start parsing interrupt
1767 * from the last not overwritten vector (wptr + 16). Hopefully
1768 * this should allow us to catchup.
1770 dev_warn(rdev
->dev
, "IH ring buffer overflow (0x%08X, %d, %d)\n",
1771 wptr
, rdev
->ih
.rptr
, (wptr
+ 16) + rdev
->ih
.ptr_mask
);
1772 rdev
->ih
.rptr
= (wptr
+ 16) & rdev
->ih
.ptr_mask
;
1773 tmp
= RREG32(IH_RB_CNTL
);
1774 tmp
|= IH_WPTR_OVERFLOW_CLEAR
;
1775 WREG32(IH_RB_CNTL
, tmp
);
1777 return (wptr
& rdev
->ih
.ptr_mask
);
1780 int evergreen_irq_process(struct radeon_device
*rdev
)
1782 u32 wptr
= evergreen_get_ih_wptr(rdev
);
1783 u32 rptr
= rdev
->ih
.rptr
;
1784 u32 src_id
, src_data
;
1786 u32 disp_int
, disp_int_cont
, disp_int_cont2
;
1787 u32 disp_int_cont3
, disp_int_cont4
, disp_int_cont5
;
1788 unsigned long flags
;
1789 bool queue_hotplug
= false;
1791 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr
, wptr
);
1792 if (!rdev
->ih
.enabled
)
1795 spin_lock_irqsave(&rdev
->ih
.lock
, flags
);
1798 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
1801 if (rdev
->shutdown
) {
1802 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
1807 /* display interrupts */
1808 evergreen_irq_ack(rdev
, &disp_int
, &disp_int_cont
, &disp_int_cont2
,
1809 &disp_int_cont3
, &disp_int_cont4
, &disp_int_cont5
);
1811 rdev
->ih
.wptr
= wptr
;
1812 while (rptr
!= wptr
) {
1813 /* wptr/rptr are in bytes! */
1814 ring_index
= rptr
/ 4;
1815 src_id
= rdev
->ih
.ring
[ring_index
] & 0xff;
1816 src_data
= rdev
->ih
.ring
[ring_index
+ 1] & 0xfffffff;
1819 case 1: /* D1 vblank/vline */
1821 case 0: /* D1 vblank */
1822 if (disp_int
& LB_D1_VBLANK_INTERRUPT
) {
1823 drm_handle_vblank(rdev
->ddev
, 0);
1824 wake_up(&rdev
->irq
.vblank_queue
);
1825 disp_int
&= ~LB_D1_VBLANK_INTERRUPT
;
1826 DRM_DEBUG("IH: D1 vblank\n");
1829 case 1: /* D1 vline */
1830 if (disp_int
& LB_D1_VLINE_INTERRUPT
) {
1831 disp_int
&= ~LB_D1_VLINE_INTERRUPT
;
1832 DRM_DEBUG("IH: D1 vline\n");
1836 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
1840 case 2: /* D2 vblank/vline */
1842 case 0: /* D2 vblank */
1843 if (disp_int_cont
& LB_D2_VBLANK_INTERRUPT
) {
1844 drm_handle_vblank(rdev
->ddev
, 1);
1845 wake_up(&rdev
->irq
.vblank_queue
);
1846 disp_int_cont
&= ~LB_D2_VBLANK_INTERRUPT
;
1847 DRM_DEBUG("IH: D2 vblank\n");
1850 case 1: /* D2 vline */
1851 if (disp_int_cont
& LB_D2_VLINE_INTERRUPT
) {
1852 disp_int_cont
&= ~LB_D2_VLINE_INTERRUPT
;
1853 DRM_DEBUG("IH: D2 vline\n");
1857 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
1861 case 3: /* D3 vblank/vline */
1863 case 0: /* D3 vblank */
1864 if (disp_int_cont2
& LB_D3_VBLANK_INTERRUPT
) {
1865 drm_handle_vblank(rdev
->ddev
, 2);
1866 wake_up(&rdev
->irq
.vblank_queue
);
1867 disp_int_cont2
&= ~LB_D3_VBLANK_INTERRUPT
;
1868 DRM_DEBUG("IH: D3 vblank\n");
1871 case 1: /* D3 vline */
1872 if (disp_int_cont2
& LB_D3_VLINE_INTERRUPT
) {
1873 disp_int_cont2
&= ~LB_D3_VLINE_INTERRUPT
;
1874 DRM_DEBUG("IH: D3 vline\n");
1878 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
1882 case 4: /* D4 vblank/vline */
1884 case 0: /* D4 vblank */
1885 if (disp_int_cont3
& LB_D4_VBLANK_INTERRUPT
) {
1886 drm_handle_vblank(rdev
->ddev
, 3);
1887 wake_up(&rdev
->irq
.vblank_queue
);
1888 disp_int_cont3
&= ~LB_D4_VBLANK_INTERRUPT
;
1889 DRM_DEBUG("IH: D4 vblank\n");
1892 case 1: /* D4 vline */
1893 if (disp_int_cont3
& LB_D4_VLINE_INTERRUPT
) {
1894 disp_int_cont3
&= ~LB_D4_VLINE_INTERRUPT
;
1895 DRM_DEBUG("IH: D4 vline\n");
1899 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
1903 case 5: /* D5 vblank/vline */
1905 case 0: /* D5 vblank */
1906 if (disp_int_cont4
& LB_D5_VBLANK_INTERRUPT
) {
1907 drm_handle_vblank(rdev
->ddev
, 4);
1908 wake_up(&rdev
->irq
.vblank_queue
);
1909 disp_int_cont4
&= ~LB_D5_VBLANK_INTERRUPT
;
1910 DRM_DEBUG("IH: D5 vblank\n");
1913 case 1: /* D5 vline */
1914 if (disp_int_cont4
& LB_D5_VLINE_INTERRUPT
) {
1915 disp_int_cont4
&= ~LB_D5_VLINE_INTERRUPT
;
1916 DRM_DEBUG("IH: D5 vline\n");
1920 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
1924 case 6: /* D6 vblank/vline */
1926 case 0: /* D6 vblank */
1927 if (disp_int_cont5
& LB_D6_VBLANK_INTERRUPT
) {
1928 drm_handle_vblank(rdev
->ddev
, 5);
1929 wake_up(&rdev
->irq
.vblank_queue
);
1930 disp_int_cont5
&= ~LB_D6_VBLANK_INTERRUPT
;
1931 DRM_DEBUG("IH: D6 vblank\n");
1934 case 1: /* D6 vline */
1935 if (disp_int_cont5
& LB_D6_VLINE_INTERRUPT
) {
1936 disp_int_cont5
&= ~LB_D6_VLINE_INTERRUPT
;
1937 DRM_DEBUG("IH: D6 vline\n");
1941 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
1945 case 42: /* HPD hotplug */
1948 if (disp_int
& DC_HPD1_INTERRUPT
) {
1949 disp_int
&= ~DC_HPD1_INTERRUPT
;
1950 queue_hotplug
= true;
1951 DRM_DEBUG("IH: HPD1\n");
1955 if (disp_int_cont
& DC_HPD2_INTERRUPT
) {
1956 disp_int_cont
&= ~DC_HPD2_INTERRUPT
;
1957 queue_hotplug
= true;
1958 DRM_DEBUG("IH: HPD2\n");
1962 if (disp_int_cont2
& DC_HPD3_INTERRUPT
) {
1963 disp_int_cont2
&= ~DC_HPD3_INTERRUPT
;
1964 queue_hotplug
= true;
1965 DRM_DEBUG("IH: HPD3\n");
1969 if (disp_int_cont3
& DC_HPD4_INTERRUPT
) {
1970 disp_int_cont3
&= ~DC_HPD4_INTERRUPT
;
1971 queue_hotplug
= true;
1972 DRM_DEBUG("IH: HPD4\n");
1976 if (disp_int_cont4
& DC_HPD5_INTERRUPT
) {
1977 disp_int_cont4
&= ~DC_HPD5_INTERRUPT
;
1978 queue_hotplug
= true;
1979 DRM_DEBUG("IH: HPD5\n");
1983 if (disp_int_cont5
& DC_HPD6_INTERRUPT
) {
1984 disp_int_cont5
&= ~DC_HPD6_INTERRUPT
;
1985 queue_hotplug
= true;
1986 DRM_DEBUG("IH: HPD6\n");
1990 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
1994 case 176: /* CP_INT in ring buffer */
1995 case 177: /* CP_INT in IB1 */
1996 case 178: /* CP_INT in IB2 */
1997 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data
);
1998 radeon_fence_process(rdev
);
2000 case 181: /* CP EOP event */
2001 DRM_DEBUG("IH: CP EOP\n");
2003 case 233: /* GUI IDLE */
2004 DRM_DEBUG("IH: CP EOP\n");
2005 rdev
->pm
.gui_idle
= true;
2006 wake_up(&rdev
->irq
.idle_queue
);
2009 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2013 /* wptr/rptr are in bytes! */
2015 rptr
&= rdev
->ih
.ptr_mask
;
2017 /* make sure wptr hasn't changed while processing */
2018 wptr
= evergreen_get_ih_wptr(rdev
);
2019 if (wptr
!= rdev
->ih
.wptr
)
2022 queue_work(rdev
->wq
, &rdev
->hotplug_work
);
2023 rdev
->ih
.rptr
= rptr
;
2024 WREG32(IH_RB_RPTR
, rdev
->ih
.rptr
);
2025 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
2029 static int evergreen_startup(struct radeon_device
*rdev
)
2033 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
) {
2034 r
= r600_init_microcode(rdev
);
2036 DRM_ERROR("Failed to load firmware!\n");
2041 evergreen_mc_program(rdev
);
2042 if (rdev
->flags
& RADEON_IS_AGP
) {
2043 evergreen_agp_enable(rdev
);
2045 r
= evergreen_pcie_gart_enable(rdev
);
2049 evergreen_gpu_init(rdev
);
2051 if (!rdev
->r600_blit
.shader_obj
) {
2052 r
= r600_blit_init(rdev
);
2054 DRM_ERROR("radeon: failed blitter (%d).\n", r
);
2059 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
2060 if (unlikely(r
!= 0))
2062 r
= radeon_bo_pin(rdev
->r600_blit
.shader_obj
, RADEON_GEM_DOMAIN_VRAM
,
2063 &rdev
->r600_blit
.shader_gpu_addr
);
2064 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
2066 DRM_ERROR("failed to pin blit object %d\n", r
);
2072 r
= r600_irq_init(rdev
);
2074 DRM_ERROR("radeon: IH init failed (%d).\n", r
);
2075 radeon_irq_kms_fini(rdev
);
2078 evergreen_irq_set(rdev
);
2080 r
= radeon_ring_init(rdev
, rdev
->cp
.ring_size
);
2083 r
= evergreen_cp_load_microcode(rdev
);
2086 r
= evergreen_cp_resume(rdev
);
2089 /* write back buffer are not vital so don't worry about failure */
2090 r600_wb_enable(rdev
);
2095 int evergreen_resume(struct radeon_device
*rdev
)
2099 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
2100 * posting will perform necessary task to bring back GPU into good
2104 atom_asic_init(rdev
->mode_info
.atom_context
);
2106 r
= evergreen_startup(rdev
);
2108 DRM_ERROR("r600 startup failed on resume\n");
2112 r
= r600_ib_test(rdev
);
2114 DRM_ERROR("radeon: failled testing IB (%d).\n", r
);
2122 int evergreen_suspend(struct radeon_device
*rdev
)
2127 /* FIXME: we should wait for ring to be empty */
2129 rdev
->cp
.ready
= false;
2130 evergreen_irq_suspend(rdev
);
2131 r600_wb_disable(rdev
);
2132 evergreen_pcie_gart_disable(rdev
);
2134 /* unpin shaders bo */
2135 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
2136 if (likely(r
== 0)) {
2137 radeon_bo_unpin(rdev
->r600_blit
.shader_obj
);
2138 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
2144 static bool evergreen_card_posted(struct radeon_device
*rdev
)
2148 /* first check CRTCs */
2149 reg
= RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
) |
2150 RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
) |
2151 RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
) |
2152 RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
) |
2153 RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
) |
2154 RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
2155 if (reg
& EVERGREEN_CRTC_MASTER_EN
)
2158 /* then check MEM_SIZE, in case the crtcs are off */
2159 if (RREG32(CONFIG_MEMSIZE
))
2165 /* Plan is to move initialization in that function and use
2166 * helper function so that radeon_device_init pretty much
2167 * do nothing more than calling asic specific function. This
2168 * should also allow to remove a bunch of callback function
2171 int evergreen_init(struct radeon_device
*rdev
)
2175 r
= radeon_dummy_page_init(rdev
);
2178 /* This don't do much */
2179 r
= radeon_gem_init(rdev
);
2183 if (!radeon_get_bios(rdev
)) {
2184 if (ASIC_IS_AVIVO(rdev
))
2187 /* Must be an ATOMBIOS */
2188 if (!rdev
->is_atom_bios
) {
2189 dev_err(rdev
->dev
, "Expecting atombios for R600 GPU\n");
2192 r
= radeon_atombios_init(rdev
);
2195 /* Post card if necessary */
2196 if (!evergreen_card_posted(rdev
)) {
2198 dev_err(rdev
->dev
, "Card not posted and no BIOS - ignoring\n");
2201 DRM_INFO("GPU not posted. posting now...\n");
2202 atom_asic_init(rdev
->mode_info
.atom_context
);
2204 /* Initialize scratch registers */
2205 r600_scratch_init(rdev
);
2206 /* Initialize surface registers */
2207 radeon_surface_init(rdev
);
2208 /* Initialize clocks */
2209 radeon_get_clock_info(rdev
->ddev
);
2211 r
= radeon_fence_driver_init(rdev
);
2214 /* initialize AGP */
2215 if (rdev
->flags
& RADEON_IS_AGP
) {
2216 r
= radeon_agp_init(rdev
);
2218 radeon_agp_disable(rdev
);
2220 /* initialize memory controller */
2221 r
= evergreen_mc_init(rdev
);
2224 /* Memory manager */
2225 r
= radeon_bo_init(rdev
);
2229 r
= radeon_irq_kms_init(rdev
);
2233 rdev
->cp
.ring_obj
= NULL
;
2234 r600_ring_init(rdev
, 1024 * 1024);
2236 rdev
->ih
.ring_obj
= NULL
;
2237 r600_ih_ring_init(rdev
, 64 * 1024);
2239 r
= r600_pcie_gart_init(rdev
);
2243 rdev
->accel_working
= true;
2244 r
= evergreen_startup(rdev
);
2246 dev_err(rdev
->dev
, "disabling GPU acceleration\n");
2249 r600_irq_fini(rdev
);
2250 radeon_irq_kms_fini(rdev
);
2251 evergreen_pcie_gart_fini(rdev
);
2252 rdev
->accel_working
= false;
2254 if (rdev
->accel_working
) {
2255 r
= radeon_ib_pool_init(rdev
);
2257 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r
);
2258 rdev
->accel_working
= false;
2260 r
= r600_ib_test(rdev
);
2262 DRM_ERROR("radeon: failed testing IB (%d).\n", r
);
2263 rdev
->accel_working
= false;
2269 void evergreen_fini(struct radeon_device
*rdev
)
2271 /*r600_blit_fini(rdev);*/
2274 r600_irq_fini(rdev
);
2275 radeon_irq_kms_fini(rdev
);
2276 evergreen_pcie_gart_fini(rdev
);
2277 radeon_gem_fini(rdev
);
2278 radeon_fence_driver_fini(rdev
);
2279 radeon_agp_fini(rdev
);
2280 radeon_bo_fini(rdev
);
2281 radeon_atombios_fini(rdev
);
2284 radeon_dummy_page_fini(rdev
);