]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/radeon/r600_blit_kms.c
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec...
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / radeon / r600_blit_kms.c
1 #include "drmP.h"
2 #include "drm.h"
3 #include "radeon_drm.h"
4 #include "radeon.h"
5
6 #include "r600d.h"
7 #include "r600_blit_shaders.h"
8
9 #define DI_PT_RECTLIST 0x11
10 #define DI_INDEX_SIZE_16_BIT 0x0
11 #define DI_SRC_SEL_AUTO_INDEX 0x2
12
13 #define FMT_8 0x1
14 #define FMT_5_6_5 0x8
15 #define FMT_8_8_8_8 0x1a
16 #define COLOR_8 0x1
17 #define COLOR_5_6_5 0x8
18 #define COLOR_8_8_8_8 0x1a
19
20 /* emits 21 on rv770+, 23 on r600 */
21 static void
22 set_render_target(struct radeon_device *rdev, int format,
23 int w, int h, u64 gpu_addr)
24 {
25 u32 cb_color_info;
26 int pitch, slice;
27
28 h = (h + 7) & ~7;
29 if (h < 8)
30 h = 8;
31
32 cb_color_info = ((format << 2) | (1 << 27));
33 pitch = (w / 8) - 1;
34 slice = ((w * h) / 64) - 1;
35
36 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
37 radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
38 radeon_ring_write(rdev, gpu_addr >> 8);
39
40 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
41 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
42 radeon_ring_write(rdev, 2 << 0);
43 }
44
45 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
46 radeon_ring_write(rdev, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
47 radeon_ring_write(rdev, (pitch << 0) | (slice << 10));
48
49 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
50 radeon_ring_write(rdev, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
51 radeon_ring_write(rdev, 0);
52
53 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
54 radeon_ring_write(rdev, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
55 radeon_ring_write(rdev, cb_color_info);
56
57 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
58 radeon_ring_write(rdev, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
59 radeon_ring_write(rdev, 0);
60
61 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
62 radeon_ring_write(rdev, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
63 radeon_ring_write(rdev, 0);
64
65 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
66 radeon_ring_write(rdev, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
67 radeon_ring_write(rdev, 0);
68 }
69
70 /* emits 5dw */
71 static void
72 cp_set_surface_sync(struct radeon_device *rdev,
73 u32 sync_type, u32 size,
74 u64 mc_addr)
75 {
76 u32 cp_coher_size;
77
78 if (size == 0xffffffff)
79 cp_coher_size = 0xffffffff;
80 else
81 cp_coher_size = ((size + 255) >> 8);
82
83 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
84 radeon_ring_write(rdev, sync_type);
85 radeon_ring_write(rdev, cp_coher_size);
86 radeon_ring_write(rdev, mc_addr >> 8);
87 radeon_ring_write(rdev, 10); /* poll interval */
88 }
89
90 /* emits 21dw + 1 surface sync = 26dw */
91 static void
92 set_shaders(struct radeon_device *rdev)
93 {
94 u64 gpu_addr;
95 u32 sq_pgm_resources;
96
97 /* setup shader regs */
98 sq_pgm_resources = (1 << 0);
99
100 /* VS */
101 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
102 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
103 radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
104 radeon_ring_write(rdev, gpu_addr >> 8);
105
106 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
107 radeon_ring_write(rdev, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
108 radeon_ring_write(rdev, sq_pgm_resources);
109
110 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
111 radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
112 radeon_ring_write(rdev, 0);
113
114 /* PS */
115 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
116 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
117 radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
118 radeon_ring_write(rdev, gpu_addr >> 8);
119
120 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
121 radeon_ring_write(rdev, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
122 radeon_ring_write(rdev, sq_pgm_resources | (1 << 28));
123
124 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
125 radeon_ring_write(rdev, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
126 radeon_ring_write(rdev, 2);
127
128 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
129 radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
130 radeon_ring_write(rdev, 0);
131
132 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
133 cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
134 }
135
136 /* emits 9 + 1 sync (5) = 14*/
137 static void
138 set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
139 {
140 u32 sq_vtx_constant_word2;
141
142 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
143
144 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
145 radeon_ring_write(rdev, 0x460);
146 radeon_ring_write(rdev, gpu_addr & 0xffffffff);
147 radeon_ring_write(rdev, 48 - 1);
148 radeon_ring_write(rdev, sq_vtx_constant_word2);
149 radeon_ring_write(rdev, 1 << 0);
150 radeon_ring_write(rdev, 0);
151 radeon_ring_write(rdev, 0);
152 radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
153
154 if ((rdev->family == CHIP_RV610) ||
155 (rdev->family == CHIP_RV620) ||
156 (rdev->family == CHIP_RS780) ||
157 (rdev->family == CHIP_RS880) ||
158 (rdev->family == CHIP_RV710))
159 cp_set_surface_sync(rdev,
160 PACKET3_TC_ACTION_ENA, 48, gpu_addr);
161 else
162 cp_set_surface_sync(rdev,
163 PACKET3_VC_ACTION_ENA, 48, gpu_addr);
164 }
165
166 /* emits 9 */
167 static void
168 set_tex_resource(struct radeon_device *rdev,
169 int format, int w, int h, int pitch,
170 u64 gpu_addr)
171 {
172 uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
173
174 if (h < 1)
175 h = 1;
176
177 sq_tex_resource_word0 = (1 << 0);
178 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
179 ((w - 1) << 19));
180
181 sq_tex_resource_word1 = (format << 26);
182 sq_tex_resource_word1 |= ((h - 1) << 0);
183
184 sq_tex_resource_word4 = ((1 << 14) |
185 (0 << 16) |
186 (1 << 19) |
187 (2 << 22) |
188 (3 << 25));
189
190 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
191 radeon_ring_write(rdev, 0);
192 radeon_ring_write(rdev, sq_tex_resource_word0);
193 radeon_ring_write(rdev, sq_tex_resource_word1);
194 radeon_ring_write(rdev, gpu_addr >> 8);
195 radeon_ring_write(rdev, gpu_addr >> 8);
196 radeon_ring_write(rdev, sq_tex_resource_word4);
197 radeon_ring_write(rdev, 0);
198 radeon_ring_write(rdev, SQ_TEX_VTX_VALID_TEXTURE << 30);
199 }
200
201 /* emits 12 */
202 static void
203 set_scissors(struct radeon_device *rdev, int x1, int y1,
204 int x2, int y2)
205 {
206 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
207 radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
208 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
209 radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
210
211 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
212 radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
213 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
214 radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
215
216 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
217 radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
218 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
219 radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
220 }
221
222 /* emits 10 */
223 static void
224 draw_auto(struct radeon_device *rdev)
225 {
226 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
227 radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
228 radeon_ring_write(rdev, DI_PT_RECTLIST);
229
230 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
231 radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
232
233 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
234 radeon_ring_write(rdev, 1);
235
236 radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
237 radeon_ring_write(rdev, 3);
238 radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
239
240 }
241
242 /* emits 14 */
243 static void
244 set_default_state(struct radeon_device *rdev)
245 {
246 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
247 u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
248 int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
249 int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
250 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
251 u64 gpu_addr;
252 int dwords;
253
254 switch (rdev->family) {
255 case CHIP_R600:
256 num_ps_gprs = 192;
257 num_vs_gprs = 56;
258 num_temp_gprs = 4;
259 num_gs_gprs = 0;
260 num_es_gprs = 0;
261 num_ps_threads = 136;
262 num_vs_threads = 48;
263 num_gs_threads = 4;
264 num_es_threads = 4;
265 num_ps_stack_entries = 128;
266 num_vs_stack_entries = 128;
267 num_gs_stack_entries = 0;
268 num_es_stack_entries = 0;
269 break;
270 case CHIP_RV630:
271 case CHIP_RV635:
272 num_ps_gprs = 84;
273 num_vs_gprs = 36;
274 num_temp_gprs = 4;
275 num_gs_gprs = 0;
276 num_es_gprs = 0;
277 num_ps_threads = 144;
278 num_vs_threads = 40;
279 num_gs_threads = 4;
280 num_es_threads = 4;
281 num_ps_stack_entries = 40;
282 num_vs_stack_entries = 40;
283 num_gs_stack_entries = 32;
284 num_es_stack_entries = 16;
285 break;
286 case CHIP_RV610:
287 case CHIP_RV620:
288 case CHIP_RS780:
289 case CHIP_RS880:
290 default:
291 num_ps_gprs = 84;
292 num_vs_gprs = 36;
293 num_temp_gprs = 4;
294 num_gs_gprs = 0;
295 num_es_gprs = 0;
296 num_ps_threads = 136;
297 num_vs_threads = 48;
298 num_gs_threads = 4;
299 num_es_threads = 4;
300 num_ps_stack_entries = 40;
301 num_vs_stack_entries = 40;
302 num_gs_stack_entries = 32;
303 num_es_stack_entries = 16;
304 break;
305 case CHIP_RV670:
306 num_ps_gprs = 144;
307 num_vs_gprs = 40;
308 num_temp_gprs = 4;
309 num_gs_gprs = 0;
310 num_es_gprs = 0;
311 num_ps_threads = 136;
312 num_vs_threads = 48;
313 num_gs_threads = 4;
314 num_es_threads = 4;
315 num_ps_stack_entries = 40;
316 num_vs_stack_entries = 40;
317 num_gs_stack_entries = 32;
318 num_es_stack_entries = 16;
319 break;
320 case CHIP_RV770:
321 num_ps_gprs = 192;
322 num_vs_gprs = 56;
323 num_temp_gprs = 4;
324 num_gs_gprs = 0;
325 num_es_gprs = 0;
326 num_ps_threads = 188;
327 num_vs_threads = 60;
328 num_gs_threads = 0;
329 num_es_threads = 0;
330 num_ps_stack_entries = 256;
331 num_vs_stack_entries = 256;
332 num_gs_stack_entries = 0;
333 num_es_stack_entries = 0;
334 break;
335 case CHIP_RV730:
336 case CHIP_RV740:
337 num_ps_gprs = 84;
338 num_vs_gprs = 36;
339 num_temp_gprs = 4;
340 num_gs_gprs = 0;
341 num_es_gprs = 0;
342 num_ps_threads = 188;
343 num_vs_threads = 60;
344 num_gs_threads = 0;
345 num_es_threads = 0;
346 num_ps_stack_entries = 128;
347 num_vs_stack_entries = 128;
348 num_gs_stack_entries = 0;
349 num_es_stack_entries = 0;
350 break;
351 case CHIP_RV710:
352 num_ps_gprs = 192;
353 num_vs_gprs = 56;
354 num_temp_gprs = 4;
355 num_gs_gprs = 0;
356 num_es_gprs = 0;
357 num_ps_threads = 144;
358 num_vs_threads = 48;
359 num_gs_threads = 0;
360 num_es_threads = 0;
361 num_ps_stack_entries = 128;
362 num_vs_stack_entries = 128;
363 num_gs_stack_entries = 0;
364 num_es_stack_entries = 0;
365 break;
366 }
367
368 if ((rdev->family == CHIP_RV610) ||
369 (rdev->family == CHIP_RV620) ||
370 (rdev->family == CHIP_RS780) ||
371 (rdev->family == CHIP_RS880) ||
372 (rdev->family == CHIP_RV710))
373 sq_config = 0;
374 else
375 sq_config = VC_ENABLE;
376
377 sq_config |= (DX9_CONSTS |
378 ALU_INST_PREFER_VECTOR |
379 PS_PRIO(0) |
380 VS_PRIO(1) |
381 GS_PRIO(2) |
382 ES_PRIO(3));
383
384 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
385 NUM_VS_GPRS(num_vs_gprs) |
386 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
387 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
388 NUM_ES_GPRS(num_es_gprs));
389 sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
390 NUM_VS_THREADS(num_vs_threads) |
391 NUM_GS_THREADS(num_gs_threads) |
392 NUM_ES_THREADS(num_es_threads));
393 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
394 NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
395 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
396 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
397
398 /* emit an IB pointing at default state */
399 dwords = (rdev->r600_blit.state_len + 0xf) & ~0xf;
400 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
401 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
402 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
403 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
404 radeon_ring_write(rdev, dwords);
405
406 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
407 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
408 /* SQ config */
409 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
410 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
411 radeon_ring_write(rdev, sq_config);
412 radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
413 radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
414 radeon_ring_write(rdev, sq_thread_resource_mgmt);
415 radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
416 radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
417 }
418
419 static inline uint32_t i2f(uint32_t input)
420 {
421 u32 result, i, exponent, fraction;
422
423 if ((input & 0x3fff) == 0)
424 result = 0; /* 0 is a special case */
425 else {
426 exponent = 140; /* exponent biased by 127; */
427 fraction = (input & 0x3fff) << 10; /* cheat and only
428 handle numbers below 2^^15 */
429 for (i = 0; i < 14; i++) {
430 if (fraction & 0x800000)
431 break;
432 else {
433 fraction = fraction << 1; /* keep
434 shifting left until top bit = 1 */
435 exponent = exponent - 1;
436 }
437 }
438 result = exponent << 23 | (fraction & 0x7fffff); /* mask
439 off top bit; assumed 1 */
440 }
441 return result;
442 }
443
444 int r600_blit_init(struct radeon_device *rdev)
445 {
446 u32 obj_size;
447 int r, dwords;
448 void *ptr;
449 u32 packet2s[16];
450 int num_packet2s = 0;
451
452 rdev->r600_blit.state_offset = 0;
453
454 if (rdev->family >= CHIP_RV770)
455 rdev->r600_blit.state_len = r7xx_default_size;
456 else
457 rdev->r600_blit.state_len = r6xx_default_size;
458
459 dwords = rdev->r600_blit.state_len;
460 while (dwords & 0xf) {
461 packet2s[num_packet2s++] = PACKET2(0);
462 dwords++;
463 }
464
465 obj_size = dwords * 4;
466 obj_size = ALIGN(obj_size, 256);
467
468 rdev->r600_blit.vs_offset = obj_size;
469 obj_size += r6xx_vs_size * 4;
470 obj_size = ALIGN(obj_size, 256);
471
472 rdev->r600_blit.ps_offset = obj_size;
473 obj_size += r6xx_ps_size * 4;
474 obj_size = ALIGN(obj_size, 256);
475
476 r = radeon_object_create(rdev, NULL, obj_size,
477 true, RADEON_GEM_DOMAIN_VRAM,
478 false, &rdev->r600_blit.shader_obj);
479 if (r) {
480 DRM_ERROR("r600 failed to allocate shader\n");
481 return r;
482 }
483
484 DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
485 obj_size,
486 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
487
488 r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr);
489 if (r) {
490 DRM_ERROR("failed to map blit object %d\n", r);
491 return r;
492 }
493
494 if (rdev->family >= CHIP_RV770)
495 memcpy_toio(ptr + rdev->r600_blit.state_offset,
496 r7xx_default_state, rdev->r600_blit.state_len * 4);
497 else
498 memcpy_toio(ptr + rdev->r600_blit.state_offset,
499 r6xx_default_state, rdev->r600_blit.state_len * 4);
500 if (num_packet2s)
501 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
502 packet2s, num_packet2s * 4);
503
504
505 memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
506 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
507
508 radeon_object_kunmap(rdev->r600_blit.shader_obj);
509 return 0;
510 }
511
512 void r600_blit_fini(struct radeon_device *rdev)
513 {
514 radeon_object_unpin(rdev->r600_blit.shader_obj);
515 radeon_object_unref(&rdev->r600_blit.shader_obj);
516 }
517
518 int r600_vb_ib_get(struct radeon_device *rdev)
519 {
520 int r;
521 r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
522 if (r) {
523 DRM_ERROR("failed to get IB for vertex buffer\n");
524 return r;
525 }
526
527 rdev->r600_blit.vb_total = 64*1024;
528 rdev->r600_blit.vb_used = 0;
529 return 0;
530 }
531
532 void r600_vb_ib_put(struct radeon_device *rdev)
533 {
534 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
535 mutex_lock(&rdev->ib_pool.mutex);
536 list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
537 mutex_unlock(&rdev->ib_pool.mutex);
538 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
539 }
540
541 int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
542 {
543 int r;
544 int ring_size, line_size;
545 int max_size;
546 /* loops of emits 64 + fence emit possible */
547 int dwords_per_loop = 76, num_loops;
548
549 r = r600_vb_ib_get(rdev);
550 WARN_ON(r);
551
552 /* set_render_target emits 2 extra dwords on rv6xx */
553 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
554 dwords_per_loop += 2;
555
556 /* 8 bpp vs 32 bpp for xfer unit */
557 if (size_bytes & 3)
558 line_size = 8192;
559 else
560 line_size = 8192*4;
561
562 max_size = 8192 * line_size;
563
564 /* major loops cover the max size transfer */
565 num_loops = ((size_bytes + max_size) / max_size);
566 /* minor loops cover the extra non aligned bits */
567 num_loops += ((size_bytes % line_size) ? 1 : 0);
568 /* calculate number of loops correctly */
569 ring_size = num_loops * dwords_per_loop;
570 /* set default + shaders */
571 ring_size += 40; /* shaders + def state */
572 ring_size += 3; /* fence emit for VB IB */
573 ring_size += 5; /* done copy */
574 ring_size += 3; /* fence emit for done copy */
575 r = radeon_ring_lock(rdev, ring_size);
576 WARN_ON(r);
577
578 set_default_state(rdev); /* 14 */
579 set_shaders(rdev); /* 26 */
580 return 0;
581 }
582
583 void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
584 {
585 int r;
586
587 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
588 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
589 /* wait for 3D idle clean */
590 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
591 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
592 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
593
594 if (rdev->r600_blit.vb_ib)
595 r600_vb_ib_put(rdev);
596
597 if (fence)
598 r = radeon_fence_emit(rdev, fence);
599
600 radeon_ring_unlock_commit(rdev);
601 }
602
603 void r600_kms_blit_copy(struct radeon_device *rdev,
604 u64 src_gpu_addr, u64 dst_gpu_addr,
605 int size_bytes)
606 {
607 int max_bytes;
608 u64 vb_gpu_addr;
609 u32 *vb;
610
611 DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
612 size_bytes, rdev->r600_blit.vb_used);
613 vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
614 if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
615 max_bytes = 8192;
616
617 while (size_bytes) {
618 int cur_size = size_bytes;
619 int src_x = src_gpu_addr & 255;
620 int dst_x = dst_gpu_addr & 255;
621 int h = 1;
622 src_gpu_addr = src_gpu_addr & ~255;
623 dst_gpu_addr = dst_gpu_addr & ~255;
624
625 if (!src_x && !dst_x) {
626 h = (cur_size / max_bytes);
627 if (h > 8192)
628 h = 8192;
629 if (h == 0)
630 h = 1;
631 else
632 cur_size = max_bytes;
633 } else {
634 if (cur_size > max_bytes)
635 cur_size = max_bytes;
636 if (cur_size > (max_bytes - dst_x))
637 cur_size = (max_bytes - dst_x);
638 if (cur_size > (max_bytes - src_x))
639 cur_size = (max_bytes - src_x);
640 }
641
642 if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
643 WARN_ON(1);
644
645 #if 0
646 r600_vb_ib_put(rdev);
647
648 r600_nomm_put_vb(dev);
649 r600_nomm_get_vb(dev);
650 if (!dev_priv->blit_vb)
651 return;
652 set_shaders(dev);
653 vb = r600_nomm_get_vb_ptr(dev);
654 #endif
655 }
656
657 vb[0] = i2f(dst_x);
658 vb[1] = 0;
659 vb[2] = i2f(src_x);
660 vb[3] = 0;
661
662 vb[4] = i2f(dst_x);
663 vb[5] = i2f(h);
664 vb[6] = i2f(src_x);
665 vb[7] = i2f(h);
666
667 vb[8] = i2f(dst_x + cur_size);
668 vb[9] = i2f(h);
669 vb[10] = i2f(src_x + cur_size);
670 vb[11] = i2f(h);
671
672 /* src 9 */
673 set_tex_resource(rdev, FMT_8,
674 src_x + cur_size, h, src_x + cur_size,
675 src_gpu_addr);
676
677 /* 5 */
678 cp_set_surface_sync(rdev,
679 PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
680
681 /* dst 23 */
682 set_render_target(rdev, COLOR_8,
683 dst_x + cur_size, h,
684 dst_gpu_addr);
685
686 /* scissors 12 */
687 set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
688
689 /* 14 */
690 vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
691 set_vtx_resource(rdev, vb_gpu_addr);
692
693 /* draw 10 */
694 draw_auto(rdev);
695
696 /* 5 */
697 cp_set_surface_sync(rdev,
698 PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
699 cur_size * h, dst_gpu_addr);
700
701 vb += 12;
702 rdev->r600_blit.vb_used += 12 * 4;
703
704 src_gpu_addr += cur_size * h;
705 dst_gpu_addr += cur_size * h;
706 size_bytes -= cur_size * h;
707 }
708 } else {
709 max_bytes = 8192 * 4;
710
711 while (size_bytes) {
712 int cur_size = size_bytes;
713 int src_x = (src_gpu_addr & 255);
714 int dst_x = (dst_gpu_addr & 255);
715 int h = 1;
716 src_gpu_addr = src_gpu_addr & ~255;
717 dst_gpu_addr = dst_gpu_addr & ~255;
718
719 if (!src_x && !dst_x) {
720 h = (cur_size / max_bytes);
721 if (h > 8192)
722 h = 8192;
723 if (h == 0)
724 h = 1;
725 else
726 cur_size = max_bytes;
727 } else {
728 if (cur_size > max_bytes)
729 cur_size = max_bytes;
730 if (cur_size > (max_bytes - dst_x))
731 cur_size = (max_bytes - dst_x);
732 if (cur_size > (max_bytes - src_x))
733 cur_size = (max_bytes - src_x);
734 }
735
736 if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
737 WARN_ON(1);
738 }
739 #if 0
740 if ((rdev->blit_vb->used + 48) > rdev->blit_vb->total) {
741 r600_nomm_put_vb(dev);
742 r600_nomm_get_vb(dev);
743 if (!rdev->blit_vb)
744 return;
745
746 set_shaders(dev);
747 vb = r600_nomm_get_vb_ptr(dev);
748 }
749 #endif
750
751 vb[0] = i2f(dst_x / 4);
752 vb[1] = 0;
753 vb[2] = i2f(src_x / 4);
754 vb[3] = 0;
755
756 vb[4] = i2f(dst_x / 4);
757 vb[5] = i2f(h);
758 vb[6] = i2f(src_x / 4);
759 vb[7] = i2f(h);
760
761 vb[8] = i2f((dst_x + cur_size) / 4);
762 vb[9] = i2f(h);
763 vb[10] = i2f((src_x + cur_size) / 4);
764 vb[11] = i2f(h);
765
766 /* src 9 */
767 set_tex_resource(rdev, FMT_8_8_8_8,
768 (src_x + cur_size) / 4,
769 h, (src_x + cur_size) / 4,
770 src_gpu_addr);
771 /* 5 */
772 cp_set_surface_sync(rdev,
773 PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
774
775 /* dst 23 */
776 set_render_target(rdev, COLOR_8_8_8_8,
777 (dst_x + cur_size) / 4, h,
778 dst_gpu_addr);
779
780 /* scissors 12 */
781 set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
782
783 /* Vertex buffer setup 14 */
784 vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
785 set_vtx_resource(rdev, vb_gpu_addr);
786
787 /* draw 10 */
788 draw_auto(rdev);
789
790 /* 5 */
791 cp_set_surface_sync(rdev,
792 PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
793 cur_size * h, dst_gpu_addr);
794
795 /* 78 ring dwords per loop */
796 vb += 12;
797 rdev->r600_blit.vb_used += 12 * 4;
798
799 src_gpu_addr += cur_size * h;
800 dst_gpu_addr += cur_size * h;
801 size_bytes -= cur_size * h;
802 }
803 }
804 }
805