]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - drivers/gpu/drm/radeon/r600.c
drm/radeon/kms: fix divide by 0 in clocks code
[mirror_ubuntu-kernels.git] / drivers / gpu / drm / radeon / r600.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
3ce0a23d
JG
28#include <linux/seq_file.h>
29#include <linux/firmware.h>
30#include <linux/platform_device.h>
771fe6b9 31#include "drmP.h"
3ce0a23d 32#include "radeon_drm.h"
771fe6b9 33#include "radeon.h"
3ce0a23d 34#include "radeon_mode.h"
3ce0a23d 35#include "r600d.h"
3ce0a23d 36#include "atom.h"
d39c3b89 37#include "avivod.h"
771fe6b9 38
3ce0a23d
JG
39#define PFP_UCODE_SIZE 576
40#define PM4_UCODE_SIZE 1792
41#define R700_PFP_UCODE_SIZE 848
42#define R700_PM4_UCODE_SIZE 1360
43
44/* Firmware Names */
45MODULE_FIRMWARE("radeon/R600_pfp.bin");
46MODULE_FIRMWARE("radeon/R600_me.bin");
47MODULE_FIRMWARE("radeon/RV610_pfp.bin");
48MODULE_FIRMWARE("radeon/RV610_me.bin");
49MODULE_FIRMWARE("radeon/RV630_pfp.bin");
50MODULE_FIRMWARE("radeon/RV630_me.bin");
51MODULE_FIRMWARE("radeon/RV620_pfp.bin");
52MODULE_FIRMWARE("radeon/RV620_me.bin");
53MODULE_FIRMWARE("radeon/RV635_pfp.bin");
54MODULE_FIRMWARE("radeon/RV635_me.bin");
55MODULE_FIRMWARE("radeon/RV670_pfp.bin");
56MODULE_FIRMWARE("radeon/RV670_me.bin");
57MODULE_FIRMWARE("radeon/RS780_pfp.bin");
58MODULE_FIRMWARE("radeon/RS780_me.bin");
59MODULE_FIRMWARE("radeon/RV770_pfp.bin");
60MODULE_FIRMWARE("radeon/RV770_me.bin");
61MODULE_FIRMWARE("radeon/RV730_pfp.bin");
62MODULE_FIRMWARE("radeon/RV730_me.bin");
63MODULE_FIRMWARE("radeon/RV710_pfp.bin");
64MODULE_FIRMWARE("radeon/RV710_me.bin");
65
66int r600_debugfs_mc_info_init(struct radeon_device *rdev);
771fe6b9 67
1a029b76 68/* r600,rv610,rv630,rv620,rv635,rv670 */
771fe6b9
JG
69int r600_mc_wait_for_idle(struct radeon_device *rdev);
70void r600_gpu_init(struct radeon_device *rdev);
3ce0a23d 71void r600_fini(struct radeon_device *rdev);
771fe6b9 72
771fe6b9 73/*
3ce0a23d 74 * R600 PCIE GART
771fe6b9 75 */
3ce0a23d 76int r600_gart_clear_page(struct radeon_device *rdev, int i)
771fe6b9 77{
3ce0a23d
JG
78 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
79 u64 pte;
771fe6b9 80
3ce0a23d
JG
81 if (i < 0 || i > rdev->gart.num_gpu_pages)
82 return -EINVAL;
83 pte = 0;
84 writeq(pte, ((void __iomem *)ptr) + (i * 8));
85 return 0;
86}
771fe6b9 87
3ce0a23d
JG
88void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
89{
90 unsigned i;
91 u32 tmp;
92
93 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
94 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
95 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
96 for (i = 0; i < rdev->usec_timeout; i++) {
97 /* read MC_STATUS */
98 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
99 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
100 if (tmp == 2) {
101 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
102 return;
103 }
104 if (tmp) {
105 return;
106 }
107 udelay(1);
108 }
109}
110
4aac0473 111int r600_pcie_gart_init(struct radeon_device *rdev)
3ce0a23d 112{
4aac0473 113 int r;
3ce0a23d 114
4aac0473
JG
115 if (rdev->gart.table.vram.robj) {
116 WARN(1, "R600 PCIE GART already initialized.\n");
117 return 0;
118 }
3ce0a23d
JG
119 /* Initialize common gart structure */
120 r = radeon_gart_init(rdev);
4aac0473 121 if (r)
3ce0a23d 122 return r;
3ce0a23d 123 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
4aac0473
JG
124 return radeon_gart_table_vram_alloc(rdev);
125}
126
127int r600_pcie_gart_enable(struct radeon_device *rdev)
128{
129 u32 tmp;
130 int r, i;
131
132 if (rdev->gart.table.vram.robj == NULL) {
133 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
134 return -EINVAL;
771fe6b9 135 }
4aac0473
JG
136 r = radeon_gart_table_vram_pin(rdev);
137 if (r)
138 return r;
bc1a631e 139
3ce0a23d
JG
140 /* Setup L2 cache */
141 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
142 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
143 EFFECTIVE_L2_QUEUE_SIZE(7));
144 WREG32(VM_L2_CNTL2, 0);
145 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
146 /* Setup TLB control */
147 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
148 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
149 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
150 ENABLE_WAIT_L2_QUERY;
151 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
152 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
153 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
154 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
155 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
156 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
157 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
158 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
159 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
160 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
161 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
162 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
163 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
164 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
165 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1a029b76 166 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
3ce0a23d
JG
167 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
168 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
169 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
170 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
171 (u32)(rdev->dummy_page.addr >> 12));
172 for (i = 1; i < 7; i++)
173 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
771fe6b9 174
3ce0a23d
JG
175 r600_pcie_gart_tlb_flush(rdev);
176 rdev->gart.ready = true;
771fe6b9
JG
177 return 0;
178}
179
3ce0a23d 180void r600_pcie_gart_disable(struct radeon_device *rdev)
771fe6b9 181{
3ce0a23d
JG
182 u32 tmp;
183 int i;
771fe6b9 184
3ce0a23d
JG
185 /* Disable all tables */
186 for (i = 0; i < 7; i++)
187 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
771fe6b9 188
3ce0a23d
JG
189 /* Disable L2 cache */
190 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
191 EFFECTIVE_L2_QUEUE_SIZE(7));
192 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
193 /* Setup L1 TLB control */
194 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
195 ENABLE_WAIT_L2_QUERY;
196 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
197 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
198 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
199 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
200 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
201 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
202 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
203 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
204 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
205 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
206 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
207 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
208 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
209 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
4aac0473
JG
210 if (rdev->gart.table.vram.robj) {
211 radeon_object_kunmap(rdev->gart.table.vram.robj);
212 radeon_object_unpin(rdev->gart.table.vram.robj);
213 }
214}
215
216void r600_pcie_gart_fini(struct radeon_device *rdev)
217{
218 r600_pcie_gart_disable(rdev);
219 radeon_gart_table_vram_free(rdev);
220 radeon_gart_fini(rdev);
771fe6b9
JG
221}
222
1a029b76
JG
223void r600_agp_enable(struct radeon_device *rdev)
224{
225 u32 tmp;
226 int i;
227
228 /* Setup L2 cache */
229 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
230 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
231 EFFECTIVE_L2_QUEUE_SIZE(7));
232 WREG32(VM_L2_CNTL2, 0);
233 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
234 /* Setup TLB control */
235 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
236 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
237 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
238 ENABLE_WAIT_L2_QUERY;
239 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
240 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
241 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
242 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
243 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
244 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
245 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
246 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
247 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
248 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
249 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
250 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
251 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
252 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
253 for (i = 0; i < 7; i++)
254 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
255}
256
771fe6b9
JG
257int r600_mc_wait_for_idle(struct radeon_device *rdev)
258{
3ce0a23d
JG
259 unsigned i;
260 u32 tmp;
261
262 for (i = 0; i < rdev->usec_timeout; i++) {
263 /* read MC_STATUS */
264 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
265 if (!tmp)
266 return 0;
267 udelay(1);
268 }
269 return -1;
771fe6b9
JG
270}
271
a3c1945a 272static void r600_mc_program(struct radeon_device *rdev)
771fe6b9 273{
a3c1945a 274 struct rv515_mc_save save;
3ce0a23d
JG
275 u32 tmp;
276 int i, j;
771fe6b9 277
3ce0a23d
JG
278 /* Initialize HDP */
279 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
280 WREG32((0x2c14 + j), 0x00000000);
281 WREG32((0x2c18 + j), 0x00000000);
282 WREG32((0x2c1c + j), 0x00000000);
283 WREG32((0x2c20 + j), 0x00000000);
284 WREG32((0x2c24 + j), 0x00000000);
285 }
286 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
771fe6b9 287
a3c1945a 288 rv515_mc_stop(rdev, &save);
3ce0a23d 289 if (r600_mc_wait_for_idle(rdev)) {
a3c1945a 290 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3ce0a23d 291 }
a3c1945a 292 /* Lockout access through VGA aperture (doesn't exist before R600) */
3ce0a23d 293 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
3ce0a23d 294 /* Update configuration */
1a029b76
JG
295 if (rdev->flags & RADEON_IS_AGP) {
296 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
297 /* VRAM before AGP */
298 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
299 rdev->mc.vram_start >> 12);
300 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
301 rdev->mc.gtt_end >> 12);
302 } else {
303 /* VRAM after AGP */
304 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
305 rdev->mc.gtt_start >> 12);
306 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
307 rdev->mc.vram_end >> 12);
308 }
309 } else {
310 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
311 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
312 }
3ce0a23d 313 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1a029b76 314 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
3ce0a23d
JG
315 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
316 WREG32(MC_VM_FB_LOCATION, tmp);
317 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
318 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1a029b76 319 WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
3ce0a23d 320 if (rdev->flags & RADEON_IS_AGP) {
1a029b76
JG
321 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
322 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
3ce0a23d
JG
323 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
324 } else {
325 WREG32(MC_VM_AGP_BASE, 0);
326 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
327 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
328 }
3ce0a23d 329 if (r600_mc_wait_for_idle(rdev)) {
a3c1945a 330 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3ce0a23d 331 }
a3c1945a 332 rv515_mc_resume(rdev, &save);
698443d9
DA
333 /* we need to own VRAM, so turn off the VGA renderer here
334 * to stop it overwriting our objects */
d39c3b89 335 rv515_vga_render_disable(rdev);
3ce0a23d
JG
336}
337
338int r600_mc_init(struct radeon_device *rdev)
771fe6b9 339{
3ce0a23d
JG
340 fixed20_12 a;
341 u32 tmp;
5885b7a9 342 int chansize, numchan;
3ce0a23d 343 int r;
771fe6b9 344
3ce0a23d 345 /* Get VRAM informations */
771fe6b9 346 rdev->mc.vram_is_ddr = true;
3ce0a23d
JG
347 tmp = RREG32(RAMCFG);
348 if (tmp & CHANSIZE_OVERRIDE) {
771fe6b9 349 chansize = 16;
3ce0a23d 350 } else if (tmp & CHANSIZE_MASK) {
771fe6b9
JG
351 chansize = 64;
352 } else {
353 chansize = 32;
354 }
5885b7a9
AD
355 tmp = RREG32(CHMAP);
356 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
357 case 0:
358 default:
359 numchan = 1;
360 break;
361 case 1:
362 numchan = 2;
363 break;
364 case 2:
365 numchan = 4;
366 break;
367 case 3:
368 numchan = 8;
369 break;
771fe6b9 370 }
5885b7a9 371 rdev->mc.vram_width = numchan * chansize;
3ce0a23d
JG
372 /* Could aper size report 0 ? */
373 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
374 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
375 /* Setup GPU memory space */
376 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
377 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
974b16e3
AD
378
379 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
380 rdev->mc.mc_vram_size = rdev->mc.aper_size;
381
382 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
383 rdev->mc.real_vram_size = rdev->mc.aper_size;
384
3ce0a23d
JG
385 if (rdev->flags & RADEON_IS_AGP) {
386 r = radeon_agp_init(rdev);
387 if (r)
388 return r;
389 /* gtt_size is setup by radeon_agp_init */
390 rdev->mc.gtt_location = rdev->mc.agp_base;
391 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
392 /* Try to put vram before or after AGP because we
393 * we want SYSTEM_APERTURE to cover both VRAM and
394 * AGP so that GPU can catch out of VRAM/AGP access
395 */
396 if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
397 /* Enought place before */
398 rdev->mc.vram_location = rdev->mc.gtt_location -
399 rdev->mc.mc_vram_size;
400 } else if (tmp > rdev->mc.mc_vram_size) {
401 /* Enought place after */
402 rdev->mc.vram_location = rdev->mc.gtt_location +
403 rdev->mc.gtt_size;
404 } else {
405 /* Try to setup VRAM then AGP might not
406 * not work on some card
407 */
408 rdev->mc.vram_location = 0x00000000UL;
409 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
410 }
411 } else {
4d357abb
DA
412 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
413 rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
414 0xFFFF) << 24;
415 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
416 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
417 /* Enough place after vram */
418 rdev->mc.gtt_location = tmp;
419 } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
420 /* Enough place before vram */
421 rdev->mc.gtt_location = 0;
422 } else {
423 /* Not enough place after or before shrink
424 * gart size
425 */
426 if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
3ce0a23d 427 rdev->mc.gtt_location = 0;
4d357abb 428 rdev->mc.gtt_size = rdev->mc.vram_location;
3ce0a23d 429 } else {
4d357abb
DA
430 rdev->mc.gtt_location = tmp;
431 rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
3ce0a23d 432 }
3ce0a23d 433 }
4d357abb 434 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
3ce0a23d
JG
435 }
436 rdev->mc.vram_start = rdev->mc.vram_location;
1a029b76 437 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
3ce0a23d 438 rdev->mc.gtt_start = rdev->mc.gtt_location;
1a029b76 439 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
3ce0a23d
JG
440 /* FIXME: we should enforce default clock in case GPU is not in
441 * default setup
442 */
443 a.full = rfixed_const(100);
444 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
445 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
446 return 0;
771fe6b9
JG
447}
448
3ce0a23d
JG
449/* We doesn't check that the GPU really needs a reset we simply do the
450 * reset, it's up to the caller to determine if the GPU needs one. We
451 * might add an helper function to check that.
452 */
453int r600_gpu_soft_reset(struct radeon_device *rdev)
771fe6b9 454{
a3c1945a 455 struct rv515_mc_save save;
3ce0a23d
JG
456 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
457 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
458 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
459 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
460 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
461 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
462 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
463 S_008010_GUI_ACTIVE(1);
464 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
465 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
466 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
467 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
468 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
469 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
470 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
471 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
472 u32 srbm_reset = 0;
a3c1945a 473 u32 tmp;
771fe6b9 474
1a029b76
JG
475 dev_info(rdev->dev, "GPU softreset \n");
476 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
477 RREG32(R_008010_GRBM_STATUS));
478 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
a3c1945a 479 RREG32(R_008014_GRBM_STATUS2));
1a029b76
JG
480 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
481 RREG32(R_000E50_SRBM_STATUS));
a3c1945a
JG
482 rv515_mc_stop(rdev, &save);
483 if (r600_mc_wait_for_idle(rdev)) {
484 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
485 }
3ce0a23d
JG
486 /* Disable CP parsing/prefetching */
487 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
488 /* Check if any of the rendering block is busy and reset it */
489 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
490 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
a3c1945a 491 tmp = S_008020_SOFT_RESET_CR(1) |
3ce0a23d
JG
492 S_008020_SOFT_RESET_DB(1) |
493 S_008020_SOFT_RESET_CB(1) |
494 S_008020_SOFT_RESET_PA(1) |
495 S_008020_SOFT_RESET_SC(1) |
496 S_008020_SOFT_RESET_SMX(1) |
497 S_008020_SOFT_RESET_SPI(1) |
498 S_008020_SOFT_RESET_SX(1) |
499 S_008020_SOFT_RESET_SH(1) |
500 S_008020_SOFT_RESET_TC(1) |
501 S_008020_SOFT_RESET_TA(1) |
502 S_008020_SOFT_RESET_VC(1) |
a3c1945a 503 S_008020_SOFT_RESET_VGT(1);
1a029b76 504 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
a3c1945a 505 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
3ce0a23d
JG
506 (void)RREG32(R_008020_GRBM_SOFT_RESET);
507 udelay(50);
508 WREG32(R_008020_GRBM_SOFT_RESET, 0);
509 (void)RREG32(R_008020_GRBM_SOFT_RESET);
510 }
511 /* Reset CP (we always reset CP) */
a3c1945a
JG
512 tmp = S_008020_SOFT_RESET_CP(1);
513 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
514 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
3ce0a23d
JG
515 (void)RREG32(R_008020_GRBM_SOFT_RESET);
516 udelay(50);
517 WREG32(R_008020_GRBM_SOFT_RESET, 0);
518 (void)RREG32(R_008020_GRBM_SOFT_RESET);
519 /* Reset others GPU block if necessary */
520 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
521 srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
522 if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
523 srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
524 if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
525 srbm_reset |= S_000E60_SOFT_RESET_IH(1);
526 if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
527 srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
528 if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
529 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
530 if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
531 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
532 if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
533 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
534 if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
535 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
536 if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
537 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
538 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
539 srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
540 if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
541 srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
1a029b76
JG
542 if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
543 srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
544 dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
545 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
546 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
547 udelay(50);
548 WREG32(R_000E60_SRBM_SOFT_RESET, 0);
549 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
3ce0a23d
JG
550 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
551 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
552 udelay(50);
553 WREG32(R_000E60_SRBM_SOFT_RESET, 0);
554 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
555 /* Wait a little for things to settle down */
556 udelay(50);
1a029b76
JG
557 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
558 RREG32(R_008010_GRBM_STATUS));
559 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
560 RREG32(R_008014_GRBM_STATUS2));
561 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
562 RREG32(R_000E50_SRBM_STATUS));
a3c1945a
JG
563 /* After reset we need to reinit the asic as GPU often endup in an
564 * incoherent state.
565 */
566 atom_asic_init(rdev->mode_info.atom_context);
567 rv515_mc_resume(rdev, &save);
3ce0a23d
JG
568 return 0;
569}
570
571int r600_gpu_reset(struct radeon_device *rdev)
572{
573 return r600_gpu_soft_reset(rdev);
574}
575
576static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
577 u32 num_backends,
578 u32 backend_disable_mask)
579{
580 u32 backend_map = 0;
581 u32 enabled_backends_mask;
582 u32 enabled_backends_count;
583 u32 cur_pipe;
584 u32 swizzle_pipe[R6XX_MAX_PIPES];
585 u32 cur_backend;
586 u32 i;
587
588 if (num_tile_pipes > R6XX_MAX_PIPES)
589 num_tile_pipes = R6XX_MAX_PIPES;
590 if (num_tile_pipes < 1)
591 num_tile_pipes = 1;
592 if (num_backends > R6XX_MAX_BACKENDS)
593 num_backends = R6XX_MAX_BACKENDS;
594 if (num_backends < 1)
595 num_backends = 1;
596
597 enabled_backends_mask = 0;
598 enabled_backends_count = 0;
599 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
600 if (((backend_disable_mask >> i) & 1) == 0) {
601 enabled_backends_mask |= (1 << i);
602 ++enabled_backends_count;
603 }
604 if (enabled_backends_count == num_backends)
605 break;
606 }
607
608 if (enabled_backends_count == 0) {
609 enabled_backends_mask = 1;
610 enabled_backends_count = 1;
611 }
612
613 if (enabled_backends_count != num_backends)
614 num_backends = enabled_backends_count;
615
616 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
617 switch (num_tile_pipes) {
618 case 1:
619 swizzle_pipe[0] = 0;
620 break;
621 case 2:
622 swizzle_pipe[0] = 0;
623 swizzle_pipe[1] = 1;
624 break;
625 case 3:
626 swizzle_pipe[0] = 0;
627 swizzle_pipe[1] = 1;
628 swizzle_pipe[2] = 2;
629 break;
630 case 4:
631 swizzle_pipe[0] = 0;
632 swizzle_pipe[1] = 1;
633 swizzle_pipe[2] = 2;
634 swizzle_pipe[3] = 3;
635 break;
636 case 5:
637 swizzle_pipe[0] = 0;
638 swizzle_pipe[1] = 1;
639 swizzle_pipe[2] = 2;
640 swizzle_pipe[3] = 3;
641 swizzle_pipe[4] = 4;
642 break;
643 case 6:
644 swizzle_pipe[0] = 0;
645 swizzle_pipe[1] = 2;
646 swizzle_pipe[2] = 4;
647 swizzle_pipe[3] = 5;
648 swizzle_pipe[4] = 1;
649 swizzle_pipe[5] = 3;
650 break;
651 case 7:
652 swizzle_pipe[0] = 0;
653 swizzle_pipe[1] = 2;
654 swizzle_pipe[2] = 4;
655 swizzle_pipe[3] = 6;
656 swizzle_pipe[4] = 1;
657 swizzle_pipe[5] = 3;
658 swizzle_pipe[6] = 5;
659 break;
660 case 8:
661 swizzle_pipe[0] = 0;
662 swizzle_pipe[1] = 2;
663 swizzle_pipe[2] = 4;
664 swizzle_pipe[3] = 6;
665 swizzle_pipe[4] = 1;
666 swizzle_pipe[5] = 3;
667 swizzle_pipe[6] = 5;
668 swizzle_pipe[7] = 7;
669 break;
670 }
671
672 cur_backend = 0;
673 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
674 while (((1 << cur_backend) & enabled_backends_mask) == 0)
675 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
676
677 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
678
679 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
680 }
681
682 return backend_map;
683}
684
685int r600_count_pipe_bits(uint32_t val)
686{
687 int i, ret = 0;
688
689 for (i = 0; i < 32; i++) {
690 ret += val & 1;
691 val >>= 1;
692 }
693 return ret;
771fe6b9
JG
694}
695
3ce0a23d
JG
696void r600_gpu_init(struct radeon_device *rdev)
697{
698 u32 tiling_config;
699 u32 ramcfg;
700 u32 tmp;
701 int i, j;
702 u32 sq_config;
703 u32 sq_gpr_resource_mgmt_1 = 0;
704 u32 sq_gpr_resource_mgmt_2 = 0;
705 u32 sq_thread_resource_mgmt = 0;
706 u32 sq_stack_resource_mgmt_1 = 0;
707 u32 sq_stack_resource_mgmt_2 = 0;
708
709 /* FIXME: implement */
710 switch (rdev->family) {
711 case CHIP_R600:
712 rdev->config.r600.max_pipes = 4;
713 rdev->config.r600.max_tile_pipes = 8;
714 rdev->config.r600.max_simds = 4;
715 rdev->config.r600.max_backends = 4;
716 rdev->config.r600.max_gprs = 256;
717 rdev->config.r600.max_threads = 192;
718 rdev->config.r600.max_stack_entries = 256;
719 rdev->config.r600.max_hw_contexts = 8;
720 rdev->config.r600.max_gs_threads = 16;
721 rdev->config.r600.sx_max_export_size = 128;
722 rdev->config.r600.sx_max_export_pos_size = 16;
723 rdev->config.r600.sx_max_export_smx_size = 128;
724 rdev->config.r600.sq_num_cf_insts = 2;
725 break;
726 case CHIP_RV630:
727 case CHIP_RV635:
728 rdev->config.r600.max_pipes = 2;
729 rdev->config.r600.max_tile_pipes = 2;
730 rdev->config.r600.max_simds = 3;
731 rdev->config.r600.max_backends = 1;
732 rdev->config.r600.max_gprs = 128;
733 rdev->config.r600.max_threads = 192;
734 rdev->config.r600.max_stack_entries = 128;
735 rdev->config.r600.max_hw_contexts = 8;
736 rdev->config.r600.max_gs_threads = 4;
737 rdev->config.r600.sx_max_export_size = 128;
738 rdev->config.r600.sx_max_export_pos_size = 16;
739 rdev->config.r600.sx_max_export_smx_size = 128;
740 rdev->config.r600.sq_num_cf_insts = 2;
741 break;
742 case CHIP_RV610:
743 case CHIP_RV620:
744 case CHIP_RS780:
745 case CHIP_RS880:
746 rdev->config.r600.max_pipes = 1;
747 rdev->config.r600.max_tile_pipes = 1;
748 rdev->config.r600.max_simds = 2;
749 rdev->config.r600.max_backends = 1;
750 rdev->config.r600.max_gprs = 128;
751 rdev->config.r600.max_threads = 192;
752 rdev->config.r600.max_stack_entries = 128;
753 rdev->config.r600.max_hw_contexts = 4;
754 rdev->config.r600.max_gs_threads = 4;
755 rdev->config.r600.sx_max_export_size = 128;
756 rdev->config.r600.sx_max_export_pos_size = 16;
757 rdev->config.r600.sx_max_export_smx_size = 128;
758 rdev->config.r600.sq_num_cf_insts = 1;
759 break;
760 case CHIP_RV670:
761 rdev->config.r600.max_pipes = 4;
762 rdev->config.r600.max_tile_pipes = 4;
763 rdev->config.r600.max_simds = 4;
764 rdev->config.r600.max_backends = 4;
765 rdev->config.r600.max_gprs = 192;
766 rdev->config.r600.max_threads = 192;
767 rdev->config.r600.max_stack_entries = 256;
768 rdev->config.r600.max_hw_contexts = 8;
769 rdev->config.r600.max_gs_threads = 16;
770 rdev->config.r600.sx_max_export_size = 128;
771 rdev->config.r600.sx_max_export_pos_size = 16;
772 rdev->config.r600.sx_max_export_smx_size = 128;
773 rdev->config.r600.sq_num_cf_insts = 2;
774 break;
775 default:
776 break;
777 }
778
779 /* Initialize HDP */
780 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
781 WREG32((0x2c14 + j), 0x00000000);
782 WREG32((0x2c18 + j), 0x00000000);
783 WREG32((0x2c1c + j), 0x00000000);
784 WREG32((0x2c20 + j), 0x00000000);
785 WREG32((0x2c24 + j), 0x00000000);
786 }
787
788 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
789
790 /* Setup tiling */
791 tiling_config = 0;
792 ramcfg = RREG32(RAMCFG);
793 switch (rdev->config.r600.max_tile_pipes) {
794 case 1:
795 tiling_config |= PIPE_TILING(0);
796 break;
797 case 2:
798 tiling_config |= PIPE_TILING(1);
799 break;
800 case 4:
801 tiling_config |= PIPE_TILING(2);
802 break;
803 case 8:
804 tiling_config |= PIPE_TILING(3);
805 break;
806 default:
807 break;
808 }
809 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
810 tiling_config |= GROUP_SIZE(0);
811 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
812 if (tmp > 3) {
813 tiling_config |= ROW_TILING(3);
814 tiling_config |= SAMPLE_SPLIT(3);
815 } else {
816 tiling_config |= ROW_TILING(tmp);
817 tiling_config |= SAMPLE_SPLIT(tmp);
818 }
819 tiling_config |= BANK_SWAPS(1);
820 tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
821 rdev->config.r600.max_backends,
822 (0xff << rdev->config.r600.max_backends) & 0xff);
823 tiling_config |= BACKEND_MAP(tmp);
824 WREG32(GB_TILING_CONFIG, tiling_config);
825 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
826 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
827
828 tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
829 WREG32(CC_RB_BACKEND_DISABLE, tmp);
830
831 /* Setup pipes */
832 tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
833 tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
834 WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
835 WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
836
837 tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
838 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
839 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
840
841 /* Setup some CP states */
842 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
843 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
844
845 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
846 SYNC_WALKER | SYNC_ALIGNER));
847 /* Setup various GPU states */
848 if (rdev->family == CHIP_RV670)
849 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
850
851 tmp = RREG32(SX_DEBUG_1);
852 tmp |= SMX_EVENT_RELEASE;
853 if ((rdev->family > CHIP_R600))
854 tmp |= ENABLE_NEW_SMX_ADDRESS;
855 WREG32(SX_DEBUG_1, tmp);
856
857 if (((rdev->family) == CHIP_R600) ||
858 ((rdev->family) == CHIP_RV630) ||
859 ((rdev->family) == CHIP_RV610) ||
860 ((rdev->family) == CHIP_RV620) ||
ee59f2b4
AD
861 ((rdev->family) == CHIP_RS780) ||
862 ((rdev->family) == CHIP_RS880)) {
3ce0a23d
JG
863 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
864 } else {
865 WREG32(DB_DEBUG, 0);
866 }
867 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
868 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
869
870 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
871 WREG32(VGT_NUM_INSTANCES, 0);
872
873 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
874 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
875
876 tmp = RREG32(SQ_MS_FIFO_SIZES);
877 if (((rdev->family) == CHIP_RV610) ||
878 ((rdev->family) == CHIP_RV620) ||
ee59f2b4
AD
879 ((rdev->family) == CHIP_RS780) ||
880 ((rdev->family) == CHIP_RS880)) {
3ce0a23d
JG
881 tmp = (CACHE_FIFO_SIZE(0xa) |
882 FETCH_FIFO_HIWATER(0xa) |
883 DONE_FIFO_HIWATER(0xe0) |
884 ALU_UPDATE_FIFO_HIWATER(0x8));
885 } else if (((rdev->family) == CHIP_R600) ||
886 ((rdev->family) == CHIP_RV630)) {
887 tmp &= ~DONE_FIFO_HIWATER(0xff);
888 tmp |= DONE_FIFO_HIWATER(0x4);
889 }
890 WREG32(SQ_MS_FIFO_SIZES, tmp);
891
892 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
893 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
894 */
895 sq_config = RREG32(SQ_CONFIG);
896 sq_config &= ~(PS_PRIO(3) |
897 VS_PRIO(3) |
898 GS_PRIO(3) |
899 ES_PRIO(3));
900 sq_config |= (DX9_CONSTS |
901 VC_ENABLE |
902 PS_PRIO(0) |
903 VS_PRIO(1) |
904 GS_PRIO(2) |
905 ES_PRIO(3));
906
907 if ((rdev->family) == CHIP_R600) {
908 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
909 NUM_VS_GPRS(124) |
910 NUM_CLAUSE_TEMP_GPRS(4));
911 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
912 NUM_ES_GPRS(0));
913 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
914 NUM_VS_THREADS(48) |
915 NUM_GS_THREADS(4) |
916 NUM_ES_THREADS(4));
917 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
918 NUM_VS_STACK_ENTRIES(128));
919 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
920 NUM_ES_STACK_ENTRIES(0));
921 } else if (((rdev->family) == CHIP_RV610) ||
922 ((rdev->family) == CHIP_RV620) ||
ee59f2b4
AD
923 ((rdev->family) == CHIP_RS780) ||
924 ((rdev->family) == CHIP_RS880)) {
3ce0a23d
JG
925 /* no vertex cache */
926 sq_config &= ~VC_ENABLE;
927
928 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
929 NUM_VS_GPRS(44) |
930 NUM_CLAUSE_TEMP_GPRS(2));
931 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
932 NUM_ES_GPRS(17));
933 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
934 NUM_VS_THREADS(78) |
935 NUM_GS_THREADS(4) |
936 NUM_ES_THREADS(31));
937 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
938 NUM_VS_STACK_ENTRIES(40));
939 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
940 NUM_ES_STACK_ENTRIES(16));
941 } else if (((rdev->family) == CHIP_RV630) ||
942 ((rdev->family) == CHIP_RV635)) {
943 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
944 NUM_VS_GPRS(44) |
945 NUM_CLAUSE_TEMP_GPRS(2));
946 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
947 NUM_ES_GPRS(18));
948 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
949 NUM_VS_THREADS(78) |
950 NUM_GS_THREADS(4) |
951 NUM_ES_THREADS(31));
952 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
953 NUM_VS_STACK_ENTRIES(40));
954 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
955 NUM_ES_STACK_ENTRIES(16));
956 } else if ((rdev->family) == CHIP_RV670) {
957 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
958 NUM_VS_GPRS(44) |
959 NUM_CLAUSE_TEMP_GPRS(2));
960 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
961 NUM_ES_GPRS(17));
962 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
963 NUM_VS_THREADS(78) |
964 NUM_GS_THREADS(4) |
965 NUM_ES_THREADS(31));
966 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
967 NUM_VS_STACK_ENTRIES(64));
968 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
969 NUM_ES_STACK_ENTRIES(64));
970 }
971
972 WREG32(SQ_CONFIG, sq_config);
973 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
974 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
975 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
976 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
977 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
978
979 if (((rdev->family) == CHIP_RV610) ||
980 ((rdev->family) == CHIP_RV620) ||
ee59f2b4
AD
981 ((rdev->family) == CHIP_RS780) ||
982 ((rdev->family) == CHIP_RS880)) {
3ce0a23d
JG
983 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
984 } else {
985 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
986 }
987
988 /* More default values. 2D/3D driver should adjust as needed */
989 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
990 S1_X(0x4) | S1_Y(0xc)));
991 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
992 S1_X(0x2) | S1_Y(0x2) |
993 S2_X(0xa) | S2_Y(0x6) |
994 S3_X(0x6) | S3_Y(0xa)));
995 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
996 S1_X(0x4) | S1_Y(0xc) |
997 S2_X(0x1) | S2_Y(0x6) |
998 S3_X(0xa) | S3_Y(0xe)));
999 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1000 S5_X(0x0) | S5_Y(0x0) |
1001 S6_X(0xb) | S6_Y(0x4) |
1002 S7_X(0x7) | S7_Y(0x8)));
1003
1004 WREG32(VGT_STRMOUT_EN, 0);
1005 tmp = rdev->config.r600.max_pipes * 16;
1006 switch (rdev->family) {
1007 case CHIP_RV610:
3ce0a23d 1008 case CHIP_RV620:
ee59f2b4
AD
1009 case CHIP_RS780:
1010 case CHIP_RS880:
3ce0a23d
JG
1011 tmp += 32;
1012 break;
1013 case CHIP_RV670:
1014 tmp += 128;
1015 break;
1016 default:
1017 break;
1018 }
1019 if (tmp > 256) {
1020 tmp = 256;
1021 }
1022 WREG32(VGT_ES_PER_GS, 128);
1023 WREG32(VGT_GS_PER_ES, tmp);
1024 WREG32(VGT_GS_PER_VS, 2);
1025 WREG32(VGT_GS_VERTEX_REUSE, 16);
1026
1027 /* more default values. 2D/3D driver should adjust as needed */
1028 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1029 WREG32(VGT_STRMOUT_EN, 0);
1030 WREG32(SX_MISC, 0);
1031 WREG32(PA_SC_MODE_CNTL, 0);
1032 WREG32(PA_SC_AA_CONFIG, 0);
1033 WREG32(PA_SC_LINE_STIPPLE, 0);
1034 WREG32(SPI_INPUT_Z, 0);
1035 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1036 WREG32(CB_COLOR7_FRAG, 0);
1037
1038 /* Clear render buffer base addresses */
1039 WREG32(CB_COLOR0_BASE, 0);
1040 WREG32(CB_COLOR1_BASE, 0);
1041 WREG32(CB_COLOR2_BASE, 0);
1042 WREG32(CB_COLOR3_BASE, 0);
1043 WREG32(CB_COLOR4_BASE, 0);
1044 WREG32(CB_COLOR5_BASE, 0);
1045 WREG32(CB_COLOR6_BASE, 0);
1046 WREG32(CB_COLOR7_BASE, 0);
1047 WREG32(CB_COLOR7_FRAG, 0);
1048
1049 switch (rdev->family) {
1050 case CHIP_RV610:
3ce0a23d 1051 case CHIP_RV620:
ee59f2b4
AD
1052 case CHIP_RS780:
1053 case CHIP_RS880:
3ce0a23d
JG
1054 tmp = TC_L2_SIZE(8);
1055 break;
1056 case CHIP_RV630:
1057 case CHIP_RV635:
1058 tmp = TC_L2_SIZE(4);
1059 break;
1060 case CHIP_R600:
1061 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1062 break;
1063 default:
1064 tmp = TC_L2_SIZE(0);
1065 break;
1066 }
1067 WREG32(TC_CNTL, tmp);
1068
1069 tmp = RREG32(HDP_HOST_PATH_CNTL);
1070 WREG32(HDP_HOST_PATH_CNTL, tmp);
1071
1072 tmp = RREG32(ARB_POP);
1073 tmp |= ENABLE_TC128;
1074 WREG32(ARB_POP, tmp);
1075
1076 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1077 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1078 NUM_CLIP_SEQ(3)));
1079 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1080}
1081
1082
771fe6b9
JG
1083/*
1084 * Indirect registers accessor
1085 */
3ce0a23d
JG
1086u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1087{
1088 u32 r;
1089
1090 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1091 (void)RREG32(PCIE_PORT_INDEX);
1092 r = RREG32(PCIE_PORT_DATA);
1093 return r;
1094}
1095
1096void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1097{
1098 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1099 (void)RREG32(PCIE_PORT_INDEX);
1100 WREG32(PCIE_PORT_DATA, (v));
1101 (void)RREG32(PCIE_PORT_DATA);
1102}
1103
23956dfa
DA
1104void r600_hdp_flush(struct radeon_device *rdev)
1105{
1106 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1107}
3ce0a23d
JG
1108
1109/*
1110 * CP & Ring
1111 */
1112void r600_cp_stop(struct radeon_device *rdev)
1113{
1114 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1115}
1116
1117int r600_cp_init_microcode(struct radeon_device *rdev)
1118{
1119 struct platform_device *pdev;
1120 const char *chip_name;
1121 size_t pfp_req_size, me_req_size;
1122 char fw_name[30];
1123 int err;
1124
1125 DRM_DEBUG("\n");
1126
1127 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1128 err = IS_ERR(pdev);
1129 if (err) {
1130 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1131 return -EINVAL;
1132 }
1133
1134 switch (rdev->family) {
1135 case CHIP_R600: chip_name = "R600"; break;
1136 case CHIP_RV610: chip_name = "RV610"; break;
1137 case CHIP_RV630: chip_name = "RV630"; break;
1138 case CHIP_RV620: chip_name = "RV620"; break;
1139 case CHIP_RV635: chip_name = "RV635"; break;
1140 case CHIP_RV670: chip_name = "RV670"; break;
1141 case CHIP_RS780:
1142 case CHIP_RS880: chip_name = "RS780"; break;
1143 case CHIP_RV770: chip_name = "RV770"; break;
1144 case CHIP_RV730:
1145 case CHIP_RV740: chip_name = "RV730"; break;
1146 case CHIP_RV710: chip_name = "RV710"; break;
1147 default: BUG();
1148 }
1149
1150 if (rdev->family >= CHIP_RV770) {
1151 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1152 me_req_size = R700_PM4_UCODE_SIZE * 4;
1153 } else {
1154 pfp_req_size = PFP_UCODE_SIZE * 4;
1155 me_req_size = PM4_UCODE_SIZE * 12;
1156 }
1157
1158 DRM_INFO("Loading %s CP Microcode\n", chip_name);
1159
1160 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1161 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1162 if (err)
1163 goto out;
1164 if (rdev->pfp_fw->size != pfp_req_size) {
1165 printk(KERN_ERR
1166 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1167 rdev->pfp_fw->size, fw_name);
1168 err = -EINVAL;
1169 goto out;
1170 }
1171
1172 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1173 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1174 if (err)
1175 goto out;
1176 if (rdev->me_fw->size != me_req_size) {
1177 printk(KERN_ERR
1178 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1179 rdev->me_fw->size, fw_name);
1180 err = -EINVAL;
1181 }
1182out:
1183 platform_device_unregister(pdev);
1184
1185 if (err) {
1186 if (err != -EINVAL)
1187 printk(KERN_ERR
1188 "r600_cp: Failed to load firmware \"%s\"\n",
1189 fw_name);
1190 release_firmware(rdev->pfp_fw);
1191 rdev->pfp_fw = NULL;
1192 release_firmware(rdev->me_fw);
1193 rdev->me_fw = NULL;
1194 }
1195 return err;
1196}
1197
1198static int r600_cp_load_microcode(struct radeon_device *rdev)
1199{
1200 const __be32 *fw_data;
1201 int i;
1202
1203 if (!rdev->me_fw || !rdev->pfp_fw)
1204 return -EINVAL;
1205
1206 r600_cp_stop(rdev);
1207
1208 WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1209
1210 /* Reset cp */
1211 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1212 RREG32(GRBM_SOFT_RESET);
1213 mdelay(15);
1214 WREG32(GRBM_SOFT_RESET, 0);
1215
1216 WREG32(CP_ME_RAM_WADDR, 0);
1217
1218 fw_data = (const __be32 *)rdev->me_fw->data;
1219 WREG32(CP_ME_RAM_WADDR, 0);
1220 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
1221 WREG32(CP_ME_RAM_DATA,
1222 be32_to_cpup(fw_data++));
1223
1224 fw_data = (const __be32 *)rdev->pfp_fw->data;
1225 WREG32(CP_PFP_UCODE_ADDR, 0);
1226 for (i = 0; i < PFP_UCODE_SIZE; i++)
1227 WREG32(CP_PFP_UCODE_DATA,
1228 be32_to_cpup(fw_data++));
1229
1230 WREG32(CP_PFP_UCODE_ADDR, 0);
1231 WREG32(CP_ME_RAM_WADDR, 0);
1232 WREG32(CP_ME_RAM_RADDR, 0);
1233 return 0;
1234}
1235
1236int r600_cp_start(struct radeon_device *rdev)
1237{
1238 int r;
1239 uint32_t cp_me;
1240
1241 r = radeon_ring_lock(rdev, 7);
1242 if (r) {
1243 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1244 return r;
1245 }
1246 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1247 radeon_ring_write(rdev, 0x1);
1248 if (rdev->family < CHIP_RV770) {
1249 radeon_ring_write(rdev, 0x3);
1250 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1251 } else {
1252 radeon_ring_write(rdev, 0x0);
1253 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1254 }
1255 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1256 radeon_ring_write(rdev, 0);
1257 radeon_ring_write(rdev, 0);
1258 radeon_ring_unlock_commit(rdev);
1259
1260 cp_me = 0xff;
1261 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1262 return 0;
1263}
1264
1265int r600_cp_resume(struct radeon_device *rdev)
1266{
1267 u32 tmp;
1268 u32 rb_bufsz;
1269 int r;
1270
1271 /* Reset cp */
1272 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1273 RREG32(GRBM_SOFT_RESET);
1274 mdelay(15);
1275 WREG32(GRBM_SOFT_RESET, 0);
1276
1277 /* Set ring buffer size */
1278 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
d6f28938 1279 tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3ce0a23d 1280#ifdef __BIG_ENDIAN
d6f28938 1281 tmp |= BUF_SWAP_32BIT;
3ce0a23d 1282#endif
d6f28938 1283 WREG32(CP_RB_CNTL, tmp);
3ce0a23d
JG
1284 WREG32(CP_SEM_WAIT_TIMER, 0x4);
1285
1286 /* Set the write pointer delay */
1287 WREG32(CP_RB_WPTR_DELAY, 0);
1288
1289 /* Initialize the ring buffer's read and write pointers */
3ce0a23d
JG
1290 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1291 WREG32(CP_RB_RPTR_WR, 0);
1292 WREG32(CP_RB_WPTR, 0);
1293 WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
1294 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
1295 mdelay(1);
1296 WREG32(CP_RB_CNTL, tmp);
1297
1298 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1299 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1300
1301 rdev->cp.rptr = RREG32(CP_RB_RPTR);
1302 rdev->cp.wptr = RREG32(CP_RB_WPTR);
1303
1304 r600_cp_start(rdev);
1305 rdev->cp.ready = true;
1306 r = radeon_ring_test(rdev);
1307 if (r) {
1308 rdev->cp.ready = false;
1309 return r;
1310 }
1311 return 0;
1312}
1313
1314void r600_cp_commit(struct radeon_device *rdev)
1315{
1316 WREG32(CP_RB_WPTR, rdev->cp.wptr);
1317 (void)RREG32(CP_RB_WPTR);
1318}
1319
1320void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1321{
1322 u32 rb_bufsz;
1323
1324 /* Align ring size */
1325 rb_bufsz = drm_order(ring_size / 8);
1326 ring_size = (1 << (rb_bufsz + 1)) * 4;
1327 rdev->cp.ring_size = ring_size;
1328 rdev->cp.align_mask = 16 - 1;
1329}
1330
1331
1332/*
1333 * GPU scratch registers helpers function.
1334 */
1335void r600_scratch_init(struct radeon_device *rdev)
1336{
1337 int i;
1338
1339 rdev->scratch.num_reg = 7;
1340 for (i = 0; i < rdev->scratch.num_reg; i++) {
1341 rdev->scratch.free[i] = true;
1342 rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1343 }
1344}
1345
1346int r600_ring_test(struct radeon_device *rdev)
1347{
1348 uint32_t scratch;
1349 uint32_t tmp = 0;
1350 unsigned i;
1351 int r;
1352
1353 r = radeon_scratch_get(rdev, &scratch);
1354 if (r) {
1355 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
1356 return r;
1357 }
1358 WREG32(scratch, 0xCAFEDEAD);
1359 r = radeon_ring_lock(rdev, 3);
1360 if (r) {
1361 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1362 radeon_scratch_free(rdev, scratch);
1363 return r;
1364 }
1365 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1366 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1367 radeon_ring_write(rdev, 0xDEADBEEF);
1368 radeon_ring_unlock_commit(rdev);
1369 for (i = 0; i < rdev->usec_timeout; i++) {
1370 tmp = RREG32(scratch);
1371 if (tmp == 0xDEADBEEF)
1372 break;
1373 DRM_UDELAY(1);
1374 }
1375 if (i < rdev->usec_timeout) {
1376 DRM_INFO("ring test succeeded in %d usecs\n", i);
1377 } else {
1378 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1379 scratch, tmp);
1380 r = -EINVAL;
1381 }
1382 radeon_scratch_free(rdev, scratch);
1383 return r;
1384}
1385
81cc35bf
JG
1386void r600_wb_disable(struct radeon_device *rdev)
1387{
1388 WREG32(SCRATCH_UMSK, 0);
1389 if (rdev->wb.wb_obj) {
1390 radeon_object_kunmap(rdev->wb.wb_obj);
1391 radeon_object_unpin(rdev->wb.wb_obj);
1392 }
1393}
1394
1395void r600_wb_fini(struct radeon_device *rdev)
1396{
1397 r600_wb_disable(rdev);
1398 if (rdev->wb.wb_obj) {
1399 radeon_object_unref(&rdev->wb.wb_obj);
1400 rdev->wb.wb = NULL;
1401 rdev->wb.wb_obj = NULL;
1402 }
1403}
1404
1405int r600_wb_enable(struct radeon_device *rdev)
3ce0a23d
JG
1406{
1407 int r;
1408
1409 if (rdev->wb.wb_obj == NULL) {
a77f1718 1410 r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
81cc35bf 1411 RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
3ce0a23d 1412 if (r) {
81cc35bf 1413 dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
3ce0a23d
JG
1414 return r;
1415 }
81cc35bf
JG
1416 r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
1417 &rdev->wb.gpu_addr);
3ce0a23d 1418 if (r) {
81cc35bf
JG
1419 dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r);
1420 r600_wb_fini(rdev);
3ce0a23d
JG
1421 return r;
1422 }
1423 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
1424 if (r) {
81cc35bf
JG
1425 dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r);
1426 r600_wb_fini(rdev);
3ce0a23d
JG
1427 return r;
1428 }
1429 }
1430 WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
1431 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
1432 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
1433 WREG32(SCRATCH_UMSK, 0xff);
1434 return 0;
1435}
1436
3ce0a23d
JG
1437void r600_fence_ring_emit(struct radeon_device *rdev,
1438 struct radeon_fence *fence)
1439{
1440 /* Emit fence sequence & fire IRQ */
1441 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1442 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1443 radeon_ring_write(rdev, fence->seq);
1444}
1445
1446int r600_copy_dma(struct radeon_device *rdev,
1447 uint64_t src_offset,
1448 uint64_t dst_offset,
1449 unsigned num_pages,
1450 struct radeon_fence *fence)
1451{
1452 /* FIXME: implement */
1453 return 0;
1454}
1455
1456int r600_copy_blit(struct radeon_device *rdev,
1457 uint64_t src_offset, uint64_t dst_offset,
1458 unsigned num_pages, struct radeon_fence *fence)
1459{
a77f1718
MT
1460 r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
1461 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
3ce0a23d
JG
1462 r600_blit_done_copy(rdev, fence);
1463 return 0;
1464}
1465
1466int r600_irq_process(struct radeon_device *rdev)
1467{
1468 /* FIXME: implement */
1469 return 0;
1470}
1471
1472int r600_irq_set(struct radeon_device *rdev)
1473{
1474 /* FIXME: implement */
1475 return 0;
1476}
1477
1478int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1479 uint32_t tiling_flags, uint32_t pitch,
1480 uint32_t offset, uint32_t obj_size)
1481{
1482 /* FIXME: implement */
1483 return 0;
1484}
1485
1486void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1487{
1488 /* FIXME: implement */
1489}
1490
1491
1492bool r600_card_posted(struct radeon_device *rdev)
1493{
1494 uint32_t reg;
1495
1496 /* first check CRTCs */
1497 reg = RREG32(D1CRTC_CONTROL) |
1498 RREG32(D2CRTC_CONTROL);
1499 if (reg & CRTC_EN)
1500 return true;
1501
1502 /* then check MEM_SIZE, in case the crtcs are off */
1503 if (RREG32(CONFIG_MEMSIZE))
1504 return true;
1505
1506 return false;
1507}
1508
fc30b8ef 1509int r600_startup(struct radeon_device *rdev)
3ce0a23d
JG
1510{
1511 int r;
1512
a3c1945a 1513 r600_mc_program(rdev);
1a029b76
JG
1514 if (rdev->flags & RADEON_IS_AGP) {
1515 r600_agp_enable(rdev);
1516 } else {
1517 r = r600_pcie_gart_enable(rdev);
1518 if (r)
1519 return r;
1520 }
3ce0a23d 1521 r600_gpu_init(rdev);
bc1a631e
DA
1522
1523 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1524 &rdev->r600_blit.shader_gpu_addr);
1525 if (r) {
1526 DRM_ERROR("failed to pin blit object %d\n", r);
1527 return r;
1528 }
1529
3ce0a23d
JG
1530 r = radeon_ring_init(rdev, rdev->cp.ring_size);
1531 if (r)
1532 return r;
1533 r = r600_cp_load_microcode(rdev);
1534 if (r)
1535 return r;
1536 r = r600_cp_resume(rdev);
1537 if (r)
1538 return r;
81cc35bf
JG
1539 /* write back buffer are not vital so don't worry about failure */
1540 r600_wb_enable(rdev);
3ce0a23d
JG
1541 return 0;
1542}
1543
28d52043
DA
1544void r600_vga_set_state(struct radeon_device *rdev, bool state)
1545{
1546 uint32_t temp;
1547
1548 temp = RREG32(CONFIG_CNTL);
1549 if (state == false) {
1550 temp &= ~(1<<0);
1551 temp |= (1<<1);
1552 } else {
1553 temp &= ~(1<<1);
1554 }
1555 WREG32(CONFIG_CNTL, temp);
1556}
1557
fc30b8ef
DA
1558int r600_resume(struct radeon_device *rdev)
1559{
1560 int r;
1561
1a029b76
JG
1562 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
1563 * posting will perform necessary task to bring back GPU into good
1564 * shape.
1565 */
fc30b8ef 1566 /* post card */
e7d40b9a 1567 atom_asic_init(rdev->mode_info.atom_context);
fc30b8ef
DA
1568 /* Initialize clocks */
1569 r = radeon_clocks_init(rdev);
1570 if (r) {
1571 return r;
1572 }
1573
1574 r = r600_startup(rdev);
1575 if (r) {
1576 DRM_ERROR("r600 startup failed on resume\n");
1577 return r;
1578 }
1579
62a8ea3f 1580 r = r600_ib_test(rdev);
fc30b8ef
DA
1581 if (r) {
1582 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1583 return r;
1584 }
1585 return r;
1586}
1587
3ce0a23d
JG
1588int r600_suspend(struct radeon_device *rdev)
1589{
1590 /* FIXME: we should wait for ring to be empty */
1591 r600_cp_stop(rdev);
bc1a631e 1592 rdev->cp.ready = false;
81cc35bf 1593 r600_wb_disable(rdev);
4aac0473 1594 r600_pcie_gart_disable(rdev);
bc1a631e
DA
1595 /* unpin shaders bo */
1596 radeon_object_unpin(rdev->r600_blit.shader_obj);
3ce0a23d
JG
1597 return 0;
1598}
1599
1600/* Plan is to move initialization in that function and use
1601 * helper function so that radeon_device_init pretty much
1602 * do nothing more than calling asic specific function. This
1603 * should also allow to remove a bunch of callback function
1604 * like vram_info.
1605 */
1606int r600_init(struct radeon_device *rdev)
771fe6b9 1607{
3ce0a23d 1608 int r;
771fe6b9 1609
3ce0a23d
JG
1610 r = radeon_dummy_page_init(rdev);
1611 if (r)
1612 return r;
1613 if (r600_debugfs_mc_info_init(rdev)) {
1614 DRM_ERROR("Failed to register debugfs file for mc !\n");
1615 }
1616 /* This don't do much */
1617 r = radeon_gem_init(rdev);
1618 if (r)
1619 return r;
1620 /* Read BIOS */
1621 if (!radeon_get_bios(rdev)) {
1622 if (ASIC_IS_AVIVO(rdev))
1623 return -EINVAL;
1624 }
1625 /* Must be an ATOMBIOS */
e7d40b9a
JG
1626 if (!rdev->is_atom_bios) {
1627 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
3ce0a23d 1628 return -EINVAL;
e7d40b9a 1629 }
3ce0a23d
JG
1630 r = radeon_atombios_init(rdev);
1631 if (r)
1632 return r;
1633 /* Post card if necessary */
1634 if (!r600_card_posted(rdev) && rdev->bios) {
1635 DRM_INFO("GPU not posted. posting now...\n");
1636 atom_asic_init(rdev->mode_info.atom_context);
1637 }
1638 /* Initialize scratch registers */
1639 r600_scratch_init(rdev);
1640 /* Initialize surface registers */
1641 radeon_surface_init(rdev);
7433874e 1642 /* Initialize clocks */
5e6dde7e 1643 radeon_get_clock_info(rdev->ddev);
3ce0a23d
JG
1644 r = radeon_clocks_init(rdev);
1645 if (r)
1646 return r;
7433874e
RM
1647 /* Initialize power management */
1648 radeon_pm_init(rdev);
3ce0a23d
JG
1649 /* Fence driver */
1650 r = radeon_fence_driver_init(rdev);
1651 if (r)
1652 return r;
1653 r = r600_mc_init(rdev);
b574f251 1654 if (r)
3ce0a23d 1655 return r;
3ce0a23d
JG
1656 /* Memory manager */
1657 r = radeon_object_init(rdev);
1658 if (r)
1659 return r;
1660 rdev->cp.ring_obj = NULL;
1661 r600_ring_init(rdev, 1024 * 1024);
1662
1663 if (!rdev->me_fw || !rdev->pfp_fw) {
1664 r = r600_cp_init_microcode(rdev);
1665 if (r) {
1666 DRM_ERROR("Failed to load firmware!\n");
1667 return r;
1668 }
1669 }
1670
4aac0473
JG
1671 r = r600_pcie_gart_init(rdev);
1672 if (r)
1673 return r;
1674
733289c2 1675 rdev->accel_working = true;
bc1a631e
DA
1676 r = r600_blit_init(rdev);
1677 if (r) {
1678 DRM_ERROR("radeon: failled blitter (%d).\n", r);
1679 return r;
1680 }
1681
fc30b8ef 1682 r = r600_startup(rdev);
3ce0a23d 1683 if (r) {
75c81298
JG
1684 r600_suspend(rdev);
1685 r600_wb_fini(rdev);
75c81298
JG
1686 radeon_ring_fini(rdev);
1687 r600_pcie_gart_fini(rdev);
733289c2 1688 rdev->accel_working = false;
3ce0a23d 1689 }
733289c2
JG
1690 if (rdev->accel_working) {
1691 r = radeon_ib_pool_init(rdev);
1692 if (r) {
1693 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
1694 rdev->accel_working = false;
1695 }
62a8ea3f 1696 r = r600_ib_test(rdev);
733289c2
JG
1697 if (r) {
1698 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1699 rdev->accel_working = false;
1700 }
3ce0a23d
JG
1701 }
1702 return 0;
1703}
1704
1705void r600_fini(struct radeon_device *rdev)
1706{
1707 /* Suspend operations */
1708 r600_suspend(rdev);
1709
1710 r600_blit_fini(rdev);
1711 radeon_ring_fini(rdev);
81cc35bf 1712 r600_wb_fini(rdev);
4aac0473 1713 r600_pcie_gart_fini(rdev);
3ce0a23d
JG
1714 radeon_gem_fini(rdev);
1715 radeon_fence_driver_fini(rdev);
1716 radeon_clocks_fini(rdev);
3ce0a23d
JG
1717 if (rdev->flags & RADEON_IS_AGP)
1718 radeon_agp_fini(rdev);
3ce0a23d 1719 radeon_object_fini(rdev);
e7d40b9a 1720 radeon_atombios_fini(rdev);
3ce0a23d
JG
1721 kfree(rdev->bios);
1722 rdev->bios = NULL;
1723 radeon_dummy_page_fini(rdev);
1724}
1725
1726
1727/*
1728 * CS stuff
1729 */
1730void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1731{
1732 /* FIXME: implement */
1733 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1734 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
1735 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
1736 radeon_ring_write(rdev, ib->length_dw);
1737}
1738
1739int r600_ib_test(struct radeon_device *rdev)
1740{
1741 struct radeon_ib *ib;
1742 uint32_t scratch;
1743 uint32_t tmp = 0;
1744 unsigned i;
1745 int r;
1746
1747 r = radeon_scratch_get(rdev, &scratch);
1748 if (r) {
1749 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
1750 return r;
1751 }
1752 WREG32(scratch, 0xCAFEDEAD);
1753 r = radeon_ib_get(rdev, &ib);
1754 if (r) {
1755 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
1756 return r;
1757 }
1758 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
1759 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1760 ib->ptr[2] = 0xDEADBEEF;
1761 ib->ptr[3] = PACKET2(0);
1762 ib->ptr[4] = PACKET2(0);
1763 ib->ptr[5] = PACKET2(0);
1764 ib->ptr[6] = PACKET2(0);
1765 ib->ptr[7] = PACKET2(0);
1766 ib->ptr[8] = PACKET2(0);
1767 ib->ptr[9] = PACKET2(0);
1768 ib->ptr[10] = PACKET2(0);
1769 ib->ptr[11] = PACKET2(0);
1770 ib->ptr[12] = PACKET2(0);
1771 ib->ptr[13] = PACKET2(0);
1772 ib->ptr[14] = PACKET2(0);
1773 ib->ptr[15] = PACKET2(0);
1774 ib->length_dw = 16;
1775 r = radeon_ib_schedule(rdev, ib);
1776 if (r) {
1777 radeon_scratch_free(rdev, scratch);
1778 radeon_ib_free(rdev, &ib);
1779 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
1780 return r;
1781 }
1782 r = radeon_fence_wait(ib->fence, false);
1783 if (r) {
1784 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
1785 return r;
1786 }
1787 for (i = 0; i < rdev->usec_timeout; i++) {
1788 tmp = RREG32(scratch);
1789 if (tmp == 0xDEADBEEF)
1790 break;
1791 DRM_UDELAY(1);
1792 }
1793 if (i < rdev->usec_timeout) {
1794 DRM_INFO("ib test succeeded in %u usecs\n", i);
1795 } else {
1796 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
1797 scratch, tmp);
1798 r = -EINVAL;
1799 }
1800 radeon_scratch_free(rdev, scratch);
1801 radeon_ib_free(rdev, &ib);
771fe6b9
JG
1802 return r;
1803}
1804
3ce0a23d
JG
1805
1806
1807
1808/*
1809 * Debugfs info
1810 */
1811#if defined(CONFIG_DEBUG_FS)
1812
1813static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
771fe6b9 1814{
3ce0a23d
JG
1815 struct drm_info_node *node = (struct drm_info_node *) m->private;
1816 struct drm_device *dev = node->minor->dev;
1817 struct radeon_device *rdev = dev->dev_private;
1818 uint32_t rdp, wdp;
1819 unsigned count, i, j;
1820
1821 radeon_ring_free_size(rdev);
1822 rdp = RREG32(CP_RB_RPTR);
1823 wdp = RREG32(CP_RB_WPTR);
1824 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
1825 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
1826 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
1827 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
1828 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1829 seq_printf(m, "%u dwords in ring\n", count);
1830 for (j = 0; j <= count; j++) {
1831 i = (rdp + j) & rdev->cp.ptr_mask;
1832 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
1833 }
1834 return 0;
1835}
1836
1837static int r600_debugfs_mc_info(struct seq_file *m, void *data)
1838{
1839 struct drm_info_node *node = (struct drm_info_node *) m->private;
1840 struct drm_device *dev = node->minor->dev;
1841 struct radeon_device *rdev = dev->dev_private;
1842
1843 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
1844 DREG32_SYS(m, rdev, VM_L2_STATUS);
1845 return 0;
1846}
1847
1848static struct drm_info_list r600_mc_info_list[] = {
1849 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
1850 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
1851};
1852#endif
1853
1854int r600_debugfs_mc_info_init(struct radeon_device *rdev)
1855{
1856#if defined(CONFIG_DEBUG_FS)
1857 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
1858#else
1859 return 0;
1860#endif
771fe6b9 1861}