]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/radeon/evergreen_cs.c
Fix common misspellings
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / radeon / evergreen_cs.c
CommitLineData
cb5fcbd5
AD
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon.h"
30#include "evergreend.h"
31#include "evergreen_reg_safe.h"
c175ca9a 32#include "cayman_reg_safe.h"
cb5fcbd5
AD
33
34static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
35 struct radeon_cs_reloc **cs_reloc);
36
37struct evergreen_cs_track {
38 u32 group_size;
39 u32 nbanks;
40 u32 npipes;
41 /* value we track */
42 u32 nsamples;
43 u32 cb_color_base_last[12];
44 struct radeon_bo *cb_color_bo[12];
45 u32 cb_color_bo_offset[12];
46 struct radeon_bo *cb_color_fmask_bo[8];
47 struct radeon_bo *cb_color_cmask_bo[8];
48 u32 cb_color_info[12];
49 u32 cb_color_view[12];
50 u32 cb_color_pitch_idx[12];
51 u32 cb_color_slice_idx[12];
52 u32 cb_color_dim_idx[12];
53 u32 cb_color_dim[12];
54 u32 cb_color_pitch[12];
55 u32 cb_color_slice[12];
56 u32 cb_color_cmask_slice[8];
57 u32 cb_color_fmask_slice[8];
58 u32 cb_target_mask;
59 u32 cb_shader_mask;
60 u32 vgt_strmout_config;
61 u32 vgt_strmout_buffer_config;
62 u32 db_depth_control;
63 u32 db_depth_view;
64 u32 db_depth_size;
65 u32 db_depth_size_idx;
66 u32 db_z_info;
67 u32 db_z_idx;
68 u32 db_z_read_offset;
69 u32 db_z_write_offset;
70 struct radeon_bo *db_z_read_bo;
71 struct radeon_bo *db_z_write_bo;
72 u32 db_s_info;
73 u32 db_s_idx;
74 u32 db_s_read_offset;
75 u32 db_s_write_offset;
76 struct radeon_bo *db_s_read_bo;
77 struct radeon_bo *db_s_write_bo;
78};
79
80static void evergreen_cs_track_init(struct evergreen_cs_track *track)
81{
82 int i;
83
84 for (i = 0; i < 8; i++) {
85 track->cb_color_fmask_bo[i] = NULL;
86 track->cb_color_cmask_bo[i] = NULL;
87 track->cb_color_cmask_slice[i] = 0;
88 track->cb_color_fmask_slice[i] = 0;
89 }
90
91 for (i = 0; i < 12; i++) {
92 track->cb_color_base_last[i] = 0;
93 track->cb_color_bo[i] = NULL;
94 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
95 track->cb_color_info[i] = 0;
96 track->cb_color_view[i] = 0;
97 track->cb_color_pitch_idx[i] = 0;
98 track->cb_color_slice_idx[i] = 0;
99 track->cb_color_dim[i] = 0;
100 track->cb_color_pitch[i] = 0;
101 track->cb_color_slice[i] = 0;
102 track->cb_color_dim[i] = 0;
103 }
104 track->cb_target_mask = 0xFFFFFFFF;
105 track->cb_shader_mask = 0xFFFFFFFF;
106
107 track->db_depth_view = 0xFFFFC000;
108 track->db_depth_size = 0xFFFFFFFF;
109 track->db_depth_size_idx = 0;
110 track->db_depth_control = 0xFFFFFFFF;
111 track->db_z_info = 0xFFFFFFFF;
112 track->db_z_idx = 0xFFFFFFFF;
113 track->db_z_read_offset = 0xFFFFFFFF;
114 track->db_z_write_offset = 0xFFFFFFFF;
115 track->db_z_read_bo = NULL;
116 track->db_z_write_bo = NULL;
117 track->db_s_info = 0xFFFFFFFF;
118 track->db_s_idx = 0xFFFFFFFF;
119 track->db_s_read_offset = 0xFFFFFFFF;
120 track->db_s_write_offset = 0xFFFFFFFF;
121 track->db_s_read_bo = NULL;
122 track->db_s_write_bo = NULL;
123}
124
125static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
126{
127 /* XXX fill in */
128 return 0;
129}
130
131static int evergreen_cs_track_check(struct radeon_cs_parser *p)
132{
133 struct evergreen_cs_track *track = p->track;
134
135 /* we don't support stream out buffer yet */
136 if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) {
137 dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
138 return -EINVAL;
139 }
140
141 /* XXX fill in */
142 return 0;
143}
144
145/**
146 * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
147 * @parser: parser structure holding parsing context.
148 * @pkt: where to store packet informations
149 *
150 * Assume that chunk_ib_index is properly set. Will return -EINVAL
151 * if packet is bigger than remaining ib size. or if packets is unknown.
152 **/
153int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
154 struct radeon_cs_packet *pkt,
155 unsigned idx)
156{
157 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
158 uint32_t header;
159
160 if (idx >= ib_chunk->length_dw) {
161 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
162 idx, ib_chunk->length_dw);
163 return -EINVAL;
164 }
165 header = radeon_get_ib_value(p, idx);
166 pkt->idx = idx;
167 pkt->type = CP_PACKET_GET_TYPE(header);
168 pkt->count = CP_PACKET_GET_COUNT(header);
169 pkt->one_reg_wr = 0;
170 switch (pkt->type) {
171 case PACKET_TYPE0:
172 pkt->reg = CP_PACKET0_GET_REG(header);
173 break;
174 case PACKET_TYPE3:
175 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
176 break;
177 case PACKET_TYPE2:
178 pkt->count = -1;
179 break;
180 default:
181 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
182 return -EINVAL;
183 }
184 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
185 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
186 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
187 return -EINVAL;
188 }
189 return 0;
190}
191
192/**
193 * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
194 * @parser: parser structure holding parsing context.
195 * @data: pointer to relocation data
196 * @offset_start: starting offset
197 * @offset_mask: offset mask (to align start offset on)
198 * @reloc: reloc informations
199 *
200 * Check next packet is relocation packet3, do bo validation and compute
201 * GPU offset using the provided start.
202 **/
203static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
204 struct radeon_cs_reloc **cs_reloc)
205{
206 struct radeon_cs_chunk *relocs_chunk;
207 struct radeon_cs_packet p3reloc;
208 unsigned idx;
209 int r;
210
211 if (p->chunk_relocs_idx == -1) {
212 DRM_ERROR("No relocation chunk !\n");
213 return -EINVAL;
214 }
215 *cs_reloc = NULL;
216 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
217 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
218 if (r) {
219 return r;
220 }
221 p->idx += p3reloc.count + 2;
222 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
223 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
224 p3reloc.idx);
225 return -EINVAL;
226 }
227 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
228 if (idx >= relocs_chunk->length_dw) {
229 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
230 idx, relocs_chunk->length_dw);
231 return -EINVAL;
232 }
233 /* FIXME: we assume reloc size is 4 dwords */
234 *cs_reloc = p->relocs_ptr[(idx / 4)];
235 return 0;
236}
237
238/**
239 * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
240 * @parser: parser structure holding parsing context.
241 *
242 * Check next packet is relocation packet3, do bo validation and compute
243 * GPU offset using the provided start.
244 **/
245static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
246{
247 struct radeon_cs_packet p3reloc;
248 int r;
249
250 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
251 if (r) {
252 return 0;
253 }
254 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
255 return 0;
256 }
257 return 1;
258}
259
260/**
261 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
262 * @parser: parser structure holding parsing context.
263 *
264 * Userspace sends a special sequence for VLINE waits.
265 * PACKET0 - VLINE_START_END + value
266 * PACKET3 - WAIT_REG_MEM poll vline status reg
267 * RELOC (P3) - crtc_id in reloc.
268 *
269 * This function parses this and relocates the VLINE START END
270 * and WAIT_REG_MEM packets to the correct crtc.
271 * It also detects a switched off crtc and nulls out the
272 * wait in that case.
273 */
274static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
275{
276 struct drm_mode_object *obj;
277 struct drm_crtc *crtc;
278 struct radeon_crtc *radeon_crtc;
279 struct radeon_cs_packet p3reloc, wait_reg_mem;
280 int crtc_id;
281 int r;
282 uint32_t header, h_idx, reg, wait_reg_mem_info;
283 volatile uint32_t *ib;
284
285 ib = p->ib->ptr;
286
287 /* parse the WAIT_REG_MEM */
288 r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
289 if (r)
290 return r;
291
292 /* check its a WAIT_REG_MEM */
293 if (wait_reg_mem.type != PACKET_TYPE3 ||
294 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
295 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
a3a88a66 296 return -EINVAL;
cb5fcbd5
AD
297 }
298
299 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
300 /* bit 4 is reg (0) or mem (1) */
301 if (wait_reg_mem_info & 0x10) {
302 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
a3a88a66 303 return -EINVAL;
cb5fcbd5
AD
304 }
305 /* waiting for value to be equal */
306 if ((wait_reg_mem_info & 0x7) != 0x3) {
307 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
a3a88a66 308 return -EINVAL;
cb5fcbd5
AD
309 }
310 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
311 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
a3a88a66 312 return -EINVAL;
cb5fcbd5
AD
313 }
314
315 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
316 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
a3a88a66 317 return -EINVAL;
cb5fcbd5
AD
318 }
319
320 /* jump over the NOP */
321 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
322 if (r)
323 return r;
324
325 h_idx = p->idx - 2;
326 p->idx += wait_reg_mem.count + 2;
327 p->idx += p3reloc.count + 2;
328
329 header = radeon_get_ib_value(p, h_idx);
330 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
331 reg = CP_PACKET0_GET_REG(header);
cb5fcbd5
AD
332 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
333 if (!obj) {
334 DRM_ERROR("cannot find crtc %d\n", crtc_id);
a3a88a66 335 return -EINVAL;
cb5fcbd5
AD
336 }
337 crtc = obj_to_crtc(obj);
338 radeon_crtc = to_radeon_crtc(crtc);
339 crtc_id = radeon_crtc->crtc_id;
340
341 if (!crtc->enabled) {
342 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
343 ib[h_idx + 2] = PACKET2(0);
344 ib[h_idx + 3] = PACKET2(0);
345 ib[h_idx + 4] = PACKET2(0);
346 ib[h_idx + 5] = PACKET2(0);
347 ib[h_idx + 6] = PACKET2(0);
348 ib[h_idx + 7] = PACKET2(0);
349 ib[h_idx + 8] = PACKET2(0);
350 } else {
351 switch (reg) {
352 case EVERGREEN_VLINE_START_END:
353 header &= ~R600_CP_PACKET0_REG_MASK;
354 header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
355 ib[h_idx] = header;
356 ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
357 break;
358 default:
359 DRM_ERROR("unknown crtc reloc\n");
a3a88a66 360 return -EINVAL;
cb5fcbd5
AD
361 }
362 }
a3a88a66 363 return 0;
cb5fcbd5
AD
364}
365
366static int evergreen_packet0_check(struct radeon_cs_parser *p,
367 struct radeon_cs_packet *pkt,
368 unsigned idx, unsigned reg)
369{
370 int r;
371
372 switch (reg) {
373 case EVERGREEN_VLINE_START_END:
374 r = evergreen_cs_packet_parse_vline(p);
375 if (r) {
376 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
377 idx, reg);
378 return r;
379 }
380 break;
381 default:
382 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
383 reg, idx);
384 return -EINVAL;
385 }
386 return 0;
387}
388
389static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
390 struct radeon_cs_packet *pkt)
391{
392 unsigned reg, i;
393 unsigned idx;
394 int r;
395
396 idx = pkt->idx + 1;
397 reg = pkt->reg;
398 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
399 r = evergreen_packet0_check(p, pkt, idx, reg);
400 if (r) {
401 return r;
402 }
403 }
404 return 0;
405}
406
407/**
408 * evergreen_cs_check_reg() - check if register is authorized or not
409 * @parser: parser structure holding parsing context
410 * @reg: register we are testing
411 * @idx: index into the cs buffer
412 *
413 * This function will test against evergreen_reg_safe_bm and return 0
414 * if register is safe. If register is not flag as safe this function
415 * will test it against a list of register needind special handling.
416 */
417static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
418{
419 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
420 struct radeon_cs_reloc *reloc;
c175ca9a 421 u32 last_reg;
cb5fcbd5
AD
422 u32 m, i, tmp, *ib;
423 int r;
424
c175ca9a
AD
425 if (p->rdev->family >= CHIP_CAYMAN)
426 last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
427 else
428 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
429
cb5fcbd5
AD
430 i = (reg >> 7);
431 if (i > last_reg) {
432 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
433 return -EINVAL;
434 }
435 m = 1 << ((reg >> 2) & 31);
c175ca9a
AD
436 if (p->rdev->family >= CHIP_CAYMAN) {
437 if (!(cayman_reg_safe_bm[i] & m))
438 return 0;
439 } else {
440 if (!(evergreen_reg_safe_bm[i] & m))
441 return 0;
442 }
cb5fcbd5
AD
443 ib = p->ib->ptr;
444 switch (reg) {
25985edc 445 /* force following reg to 0 in an attempt to disable out buffer
cb5fcbd5
AD
446 * which will need us to better understand how it works to perform
447 * security check on it (Jerome)
448 */
449 case SQ_ESGS_RING_SIZE:
450 case SQ_GSVS_RING_SIZE:
451 case SQ_ESTMP_RING_SIZE:
452 case SQ_GSTMP_RING_SIZE:
453 case SQ_HSTMP_RING_SIZE:
454 case SQ_LSTMP_RING_SIZE:
455 case SQ_PSTMP_RING_SIZE:
456 case SQ_VSTMP_RING_SIZE:
457 case SQ_ESGS_RING_ITEMSIZE:
458 case SQ_ESTMP_RING_ITEMSIZE:
459 case SQ_GSTMP_RING_ITEMSIZE:
460 case SQ_GSVS_RING_ITEMSIZE:
461 case SQ_GS_VERT_ITEMSIZE:
462 case SQ_GS_VERT_ITEMSIZE_1:
463 case SQ_GS_VERT_ITEMSIZE_2:
464 case SQ_GS_VERT_ITEMSIZE_3:
465 case SQ_GSVS_RING_OFFSET_1:
466 case SQ_GSVS_RING_OFFSET_2:
467 case SQ_GSVS_RING_OFFSET_3:
468 case SQ_HSTMP_RING_ITEMSIZE:
469 case SQ_LSTMP_RING_ITEMSIZE:
470 case SQ_PSTMP_RING_ITEMSIZE:
471 case SQ_VSTMP_RING_ITEMSIZE:
472 case VGT_TF_RING_SIZE:
473 /* get value to populate the IB don't remove */
8aa75009
AD
474 /*tmp =radeon_get_ib_value(p, idx);
475 ib[idx] = 0;*/
476 break;
477 case SQ_ESGS_RING_BASE:
478 case SQ_GSVS_RING_BASE:
479 case SQ_ESTMP_RING_BASE:
480 case SQ_GSTMP_RING_BASE:
481 case SQ_HSTMP_RING_BASE:
482 case SQ_LSTMP_RING_BASE:
483 case SQ_PSTMP_RING_BASE:
484 case SQ_VSTMP_RING_BASE:
485 r = evergreen_cs_packet_next_reloc(p, &reloc);
486 if (r) {
487 dev_warn(p->dev, "bad SET_CONTEXT_REG "
488 "0x%04X\n", reg);
489 return -EINVAL;
490 }
491 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
cb5fcbd5
AD
492 break;
493 case DB_DEPTH_CONTROL:
494 track->db_depth_control = radeon_get_ib_value(p, idx);
495 break;
c175ca9a
AD
496 case CAYMAN_DB_EQAA:
497 if (p->rdev->family < CHIP_CAYMAN) {
498 dev_warn(p->dev, "bad SET_CONTEXT_REG "
499 "0x%04X\n", reg);
500 return -EINVAL;
501 }
502 break;
503 case CAYMAN_DB_DEPTH_INFO:
504 if (p->rdev->family < CHIP_CAYMAN) {
505 dev_warn(p->dev, "bad SET_CONTEXT_REG "
506 "0x%04X\n", reg);
507 return -EINVAL;
508 }
509 break;
cb5fcbd5
AD
510 case DB_Z_INFO:
511 r = evergreen_cs_packet_next_reloc(p, &reloc);
512 if (r) {
513 dev_warn(p->dev, "bad SET_CONTEXT_REG "
514 "0x%04X\n", reg);
515 return -EINVAL;
516 }
517 track->db_z_info = radeon_get_ib_value(p, idx);
518 ib[idx] &= ~Z_ARRAY_MODE(0xf);
519 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
520 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
521 ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
522 track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
523 } else {
524 ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
525 track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
526 }
527 break;
528 case DB_STENCIL_INFO:
529 track->db_s_info = radeon_get_ib_value(p, idx);
530 break;
531 case DB_DEPTH_VIEW:
532 track->db_depth_view = radeon_get_ib_value(p, idx);
533 break;
534 case DB_DEPTH_SIZE:
535 track->db_depth_size = radeon_get_ib_value(p, idx);
536 track->db_depth_size_idx = idx;
537 break;
538 case DB_Z_READ_BASE:
539 r = evergreen_cs_packet_next_reloc(p, &reloc);
540 if (r) {
541 dev_warn(p->dev, "bad SET_CONTEXT_REG "
542 "0x%04X\n", reg);
543 return -EINVAL;
544 }
545 track->db_z_read_offset = radeon_get_ib_value(p, idx);
546 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
547 track->db_z_read_bo = reloc->robj;
548 break;
549 case DB_Z_WRITE_BASE:
550 r = evergreen_cs_packet_next_reloc(p, &reloc);
551 if (r) {
552 dev_warn(p->dev, "bad SET_CONTEXT_REG "
553 "0x%04X\n", reg);
554 return -EINVAL;
555 }
556 track->db_z_write_offset = radeon_get_ib_value(p, idx);
557 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
558 track->db_z_write_bo = reloc->robj;
559 break;
560 case DB_STENCIL_READ_BASE:
561 r = evergreen_cs_packet_next_reloc(p, &reloc);
562 if (r) {
563 dev_warn(p->dev, "bad SET_CONTEXT_REG "
564 "0x%04X\n", reg);
565 return -EINVAL;
566 }
567 track->db_s_read_offset = radeon_get_ib_value(p, idx);
568 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
569 track->db_s_read_bo = reloc->robj;
570 break;
571 case DB_STENCIL_WRITE_BASE:
572 r = evergreen_cs_packet_next_reloc(p, &reloc);
573 if (r) {
574 dev_warn(p->dev, "bad SET_CONTEXT_REG "
575 "0x%04X\n", reg);
576 return -EINVAL;
577 }
578 track->db_s_write_offset = radeon_get_ib_value(p, idx);
579 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
580 track->db_s_write_bo = reloc->robj;
581 break;
582 case VGT_STRMOUT_CONFIG:
583 track->vgt_strmout_config = radeon_get_ib_value(p, idx);
584 break;
585 case VGT_STRMOUT_BUFFER_CONFIG:
586 track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
587 break;
588 case CB_TARGET_MASK:
589 track->cb_target_mask = radeon_get_ib_value(p, idx);
590 break;
591 case CB_SHADER_MASK:
592 track->cb_shader_mask = radeon_get_ib_value(p, idx);
593 break;
594 case PA_SC_AA_CONFIG:
c175ca9a
AD
595 if (p->rdev->family >= CHIP_CAYMAN) {
596 dev_warn(p->dev, "bad SET_CONTEXT_REG "
597 "0x%04X\n", reg);
598 return -EINVAL;
599 }
cb5fcbd5
AD
600 tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
601 track->nsamples = 1 << tmp;
602 break;
c175ca9a
AD
603 case CAYMAN_PA_SC_AA_CONFIG:
604 if (p->rdev->family < CHIP_CAYMAN) {
605 dev_warn(p->dev, "bad SET_CONTEXT_REG "
606 "0x%04X\n", reg);
607 return -EINVAL;
608 }
609 tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
610 track->nsamples = 1 << tmp;
611 break;
cb5fcbd5
AD
612 case CB_COLOR0_VIEW:
613 case CB_COLOR1_VIEW:
614 case CB_COLOR2_VIEW:
615 case CB_COLOR3_VIEW:
616 case CB_COLOR4_VIEW:
617 case CB_COLOR5_VIEW:
618 case CB_COLOR6_VIEW:
619 case CB_COLOR7_VIEW:
620 tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
621 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
622 break;
623 case CB_COLOR8_VIEW:
624 case CB_COLOR9_VIEW:
625 case CB_COLOR10_VIEW:
626 case CB_COLOR11_VIEW:
627 tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
628 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
629 break;
630 case CB_COLOR0_INFO:
631 case CB_COLOR1_INFO:
632 case CB_COLOR2_INFO:
633 case CB_COLOR3_INFO:
634 case CB_COLOR4_INFO:
635 case CB_COLOR5_INFO:
636 case CB_COLOR6_INFO:
637 case CB_COLOR7_INFO:
638 r = evergreen_cs_packet_next_reloc(p, &reloc);
639 if (r) {
640 dev_warn(p->dev, "bad SET_CONTEXT_REG "
641 "0x%04X\n", reg);
642 return -EINVAL;
643 }
644 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
645 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
646 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
647 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
648 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
649 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
650 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
651 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
652 }
653 break;
654 case CB_COLOR8_INFO:
655 case CB_COLOR9_INFO:
656 case CB_COLOR10_INFO:
657 case CB_COLOR11_INFO:
658 r = evergreen_cs_packet_next_reloc(p, &reloc);
659 if (r) {
660 dev_warn(p->dev, "bad SET_CONTEXT_REG "
661 "0x%04X\n", reg);
662 return -EINVAL;
663 }
664 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
665 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
666 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
667 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
668 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
669 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
670 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
671 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
672 }
673 break;
674 case CB_COLOR0_PITCH:
675 case CB_COLOR1_PITCH:
676 case CB_COLOR2_PITCH:
677 case CB_COLOR3_PITCH:
678 case CB_COLOR4_PITCH:
679 case CB_COLOR5_PITCH:
680 case CB_COLOR6_PITCH:
681 case CB_COLOR7_PITCH:
682 tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
683 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
684 track->cb_color_pitch_idx[tmp] = idx;
685 break;
686 case CB_COLOR8_PITCH:
687 case CB_COLOR9_PITCH:
688 case CB_COLOR10_PITCH:
689 case CB_COLOR11_PITCH:
690 tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
691 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
692 track->cb_color_pitch_idx[tmp] = idx;
693 break;
694 case CB_COLOR0_SLICE:
695 case CB_COLOR1_SLICE:
696 case CB_COLOR2_SLICE:
697 case CB_COLOR3_SLICE:
698 case CB_COLOR4_SLICE:
699 case CB_COLOR5_SLICE:
700 case CB_COLOR6_SLICE:
701 case CB_COLOR7_SLICE:
702 tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
703 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
704 track->cb_color_slice_idx[tmp] = idx;
705 break;
706 case CB_COLOR8_SLICE:
707 case CB_COLOR9_SLICE:
708 case CB_COLOR10_SLICE:
709 case CB_COLOR11_SLICE:
710 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
711 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
712 track->cb_color_slice_idx[tmp] = idx;
713 break;
714 case CB_COLOR0_ATTRIB:
715 case CB_COLOR1_ATTRIB:
716 case CB_COLOR2_ATTRIB:
717 case CB_COLOR3_ATTRIB:
718 case CB_COLOR4_ATTRIB:
719 case CB_COLOR5_ATTRIB:
720 case CB_COLOR6_ATTRIB:
721 case CB_COLOR7_ATTRIB:
722 case CB_COLOR8_ATTRIB:
723 case CB_COLOR9_ATTRIB:
724 case CB_COLOR10_ATTRIB:
725 case CB_COLOR11_ATTRIB:
726 break;
727 case CB_COLOR0_DIM:
728 case CB_COLOR1_DIM:
729 case CB_COLOR2_DIM:
730 case CB_COLOR3_DIM:
731 case CB_COLOR4_DIM:
732 case CB_COLOR5_DIM:
733 case CB_COLOR6_DIM:
734 case CB_COLOR7_DIM:
735 tmp = (reg - CB_COLOR0_DIM) / 0x3c;
736 track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
737 track->cb_color_dim_idx[tmp] = idx;
738 break;
739 case CB_COLOR8_DIM:
740 case CB_COLOR9_DIM:
741 case CB_COLOR10_DIM:
742 case CB_COLOR11_DIM:
743 tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8;
744 track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
745 track->cb_color_dim_idx[tmp] = idx;
746 break;
747 case CB_COLOR0_FMASK:
748 case CB_COLOR1_FMASK:
749 case CB_COLOR2_FMASK:
750 case CB_COLOR3_FMASK:
751 case CB_COLOR4_FMASK:
752 case CB_COLOR5_FMASK:
753 case CB_COLOR6_FMASK:
754 case CB_COLOR7_FMASK:
755 tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
756 r = evergreen_cs_packet_next_reloc(p, &reloc);
757 if (r) {
758 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
759 return -EINVAL;
760 }
761 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
762 track->cb_color_fmask_bo[tmp] = reloc->robj;
763 break;
764 case CB_COLOR0_CMASK:
765 case CB_COLOR1_CMASK:
766 case CB_COLOR2_CMASK:
767 case CB_COLOR3_CMASK:
768 case CB_COLOR4_CMASK:
769 case CB_COLOR5_CMASK:
770 case CB_COLOR6_CMASK:
771 case CB_COLOR7_CMASK:
772 tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
773 r = evergreen_cs_packet_next_reloc(p, &reloc);
774 if (r) {
775 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
776 return -EINVAL;
777 }
778 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
779 track->cb_color_cmask_bo[tmp] = reloc->robj;
780 break;
781 case CB_COLOR0_FMASK_SLICE:
782 case CB_COLOR1_FMASK_SLICE:
783 case CB_COLOR2_FMASK_SLICE:
784 case CB_COLOR3_FMASK_SLICE:
785 case CB_COLOR4_FMASK_SLICE:
786 case CB_COLOR5_FMASK_SLICE:
787 case CB_COLOR6_FMASK_SLICE:
788 case CB_COLOR7_FMASK_SLICE:
789 tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
790 track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
791 break;
792 case CB_COLOR0_CMASK_SLICE:
793 case CB_COLOR1_CMASK_SLICE:
794 case CB_COLOR2_CMASK_SLICE:
795 case CB_COLOR3_CMASK_SLICE:
796 case CB_COLOR4_CMASK_SLICE:
797 case CB_COLOR5_CMASK_SLICE:
798 case CB_COLOR6_CMASK_SLICE:
799 case CB_COLOR7_CMASK_SLICE:
800 tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
801 track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
802 break;
803 case CB_COLOR0_BASE:
804 case CB_COLOR1_BASE:
805 case CB_COLOR2_BASE:
806 case CB_COLOR3_BASE:
807 case CB_COLOR4_BASE:
808 case CB_COLOR5_BASE:
809 case CB_COLOR6_BASE:
810 case CB_COLOR7_BASE:
811 r = evergreen_cs_packet_next_reloc(p, &reloc);
812 if (r) {
813 dev_warn(p->dev, "bad SET_CONTEXT_REG "
814 "0x%04X\n", reg);
815 return -EINVAL;
816 }
817 tmp = (reg - CB_COLOR0_BASE) / 0x3c;
818 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
819 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
820 track->cb_color_base_last[tmp] = ib[idx];
821 track->cb_color_bo[tmp] = reloc->robj;
822 break;
823 case CB_COLOR8_BASE:
824 case CB_COLOR9_BASE:
825 case CB_COLOR10_BASE:
826 case CB_COLOR11_BASE:
827 r = evergreen_cs_packet_next_reloc(p, &reloc);
828 if (r) {
829 dev_warn(p->dev, "bad SET_CONTEXT_REG "
830 "0x%04X\n", reg);
831 return -EINVAL;
832 }
833 tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
834 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
835 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
836 track->cb_color_base_last[tmp] = ib[idx];
837 track->cb_color_bo[tmp] = reloc->robj;
838 break;
839 case CB_IMMED0_BASE:
840 case CB_IMMED1_BASE:
841 case CB_IMMED2_BASE:
842 case CB_IMMED3_BASE:
843 case CB_IMMED4_BASE:
844 case CB_IMMED5_BASE:
845 case CB_IMMED6_BASE:
846 case CB_IMMED7_BASE:
847 case CB_IMMED8_BASE:
848 case CB_IMMED9_BASE:
849 case CB_IMMED10_BASE:
850 case CB_IMMED11_BASE:
851 case DB_HTILE_DATA_BASE:
852 case SQ_PGM_START_FS:
853 case SQ_PGM_START_ES:
854 case SQ_PGM_START_VS:
855 case SQ_PGM_START_GS:
856 case SQ_PGM_START_PS:
857 case SQ_PGM_START_HS:
858 case SQ_PGM_START_LS:
859 case GDS_ADDR_BASE:
860 case SQ_CONST_MEM_BASE:
861 case SQ_ALU_CONST_CACHE_GS_0:
862 case SQ_ALU_CONST_CACHE_GS_1:
863 case SQ_ALU_CONST_CACHE_GS_2:
864 case SQ_ALU_CONST_CACHE_GS_3:
865 case SQ_ALU_CONST_CACHE_GS_4:
866 case SQ_ALU_CONST_CACHE_GS_5:
867 case SQ_ALU_CONST_CACHE_GS_6:
868 case SQ_ALU_CONST_CACHE_GS_7:
869 case SQ_ALU_CONST_CACHE_GS_8:
870 case SQ_ALU_CONST_CACHE_GS_9:
871 case SQ_ALU_CONST_CACHE_GS_10:
872 case SQ_ALU_CONST_CACHE_GS_11:
873 case SQ_ALU_CONST_CACHE_GS_12:
874 case SQ_ALU_CONST_CACHE_GS_13:
875 case SQ_ALU_CONST_CACHE_GS_14:
876 case SQ_ALU_CONST_CACHE_GS_15:
877 case SQ_ALU_CONST_CACHE_PS_0:
878 case SQ_ALU_CONST_CACHE_PS_1:
879 case SQ_ALU_CONST_CACHE_PS_2:
880 case SQ_ALU_CONST_CACHE_PS_3:
881 case SQ_ALU_CONST_CACHE_PS_4:
882 case SQ_ALU_CONST_CACHE_PS_5:
883 case SQ_ALU_CONST_CACHE_PS_6:
884 case SQ_ALU_CONST_CACHE_PS_7:
885 case SQ_ALU_CONST_CACHE_PS_8:
886 case SQ_ALU_CONST_CACHE_PS_9:
887 case SQ_ALU_CONST_CACHE_PS_10:
888 case SQ_ALU_CONST_CACHE_PS_11:
889 case SQ_ALU_CONST_CACHE_PS_12:
890 case SQ_ALU_CONST_CACHE_PS_13:
891 case SQ_ALU_CONST_CACHE_PS_14:
892 case SQ_ALU_CONST_CACHE_PS_15:
893 case SQ_ALU_CONST_CACHE_VS_0:
894 case SQ_ALU_CONST_CACHE_VS_1:
895 case SQ_ALU_CONST_CACHE_VS_2:
896 case SQ_ALU_CONST_CACHE_VS_3:
897 case SQ_ALU_CONST_CACHE_VS_4:
898 case SQ_ALU_CONST_CACHE_VS_5:
899 case SQ_ALU_CONST_CACHE_VS_6:
900 case SQ_ALU_CONST_CACHE_VS_7:
901 case SQ_ALU_CONST_CACHE_VS_8:
902 case SQ_ALU_CONST_CACHE_VS_9:
903 case SQ_ALU_CONST_CACHE_VS_10:
904 case SQ_ALU_CONST_CACHE_VS_11:
905 case SQ_ALU_CONST_CACHE_VS_12:
906 case SQ_ALU_CONST_CACHE_VS_13:
907 case SQ_ALU_CONST_CACHE_VS_14:
908 case SQ_ALU_CONST_CACHE_VS_15:
909 case SQ_ALU_CONST_CACHE_HS_0:
910 case SQ_ALU_CONST_CACHE_HS_1:
911 case SQ_ALU_CONST_CACHE_HS_2:
912 case SQ_ALU_CONST_CACHE_HS_3:
913 case SQ_ALU_CONST_CACHE_HS_4:
914 case SQ_ALU_CONST_CACHE_HS_5:
915 case SQ_ALU_CONST_CACHE_HS_6:
916 case SQ_ALU_CONST_CACHE_HS_7:
917 case SQ_ALU_CONST_CACHE_HS_8:
918 case SQ_ALU_CONST_CACHE_HS_9:
919 case SQ_ALU_CONST_CACHE_HS_10:
920 case SQ_ALU_CONST_CACHE_HS_11:
921 case SQ_ALU_CONST_CACHE_HS_12:
922 case SQ_ALU_CONST_CACHE_HS_13:
923 case SQ_ALU_CONST_CACHE_HS_14:
924 case SQ_ALU_CONST_CACHE_HS_15:
925 case SQ_ALU_CONST_CACHE_LS_0:
926 case SQ_ALU_CONST_CACHE_LS_1:
927 case SQ_ALU_CONST_CACHE_LS_2:
928 case SQ_ALU_CONST_CACHE_LS_3:
929 case SQ_ALU_CONST_CACHE_LS_4:
930 case SQ_ALU_CONST_CACHE_LS_5:
931 case SQ_ALU_CONST_CACHE_LS_6:
932 case SQ_ALU_CONST_CACHE_LS_7:
933 case SQ_ALU_CONST_CACHE_LS_8:
934 case SQ_ALU_CONST_CACHE_LS_9:
935 case SQ_ALU_CONST_CACHE_LS_10:
936 case SQ_ALU_CONST_CACHE_LS_11:
937 case SQ_ALU_CONST_CACHE_LS_12:
938 case SQ_ALU_CONST_CACHE_LS_13:
939 case SQ_ALU_CONST_CACHE_LS_14:
940 case SQ_ALU_CONST_CACHE_LS_15:
941 r = evergreen_cs_packet_next_reloc(p, &reloc);
942 if (r) {
943 dev_warn(p->dev, "bad SET_CONTEXT_REG "
944 "0x%04X\n", reg);
945 return -EINVAL;
946 }
947 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
948 break;
949 default:
950 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
951 return -EINVAL;
952 }
953 return 0;
954}
955
956/**
957 * evergreen_check_texture_resource() - check if register is authorized or not
958 * @p: parser structure holding parsing context
959 * @idx: index into the cs buffer
960 * @texture: texture's bo structure
961 * @mipmap: mipmap's bo structure
962 *
963 * This function will check that the resource has valid field and that
964 * the texture and mipmap bo object are big enough to cover this resource.
965 */
966static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
967 struct radeon_bo *texture,
968 struct radeon_bo *mipmap)
969{
970 /* XXX fill in */
971 return 0;
972}
973
974static int evergreen_packet3_check(struct radeon_cs_parser *p,
975 struct radeon_cs_packet *pkt)
976{
977 struct radeon_cs_reloc *reloc;
978 struct evergreen_cs_track *track;
979 volatile u32 *ib;
980 unsigned idx;
981 unsigned i;
982 unsigned start_reg, end_reg, reg;
983 int r;
984 u32 idx_value;
985
986 track = (struct evergreen_cs_track *)p->track;
987 ib = p->ib->ptr;
988 idx = pkt->idx + 1;
989 idx_value = radeon_get_ib_value(p, idx);
990
991 switch (pkt->opcode) {
2a19cac8
DA
992 case PACKET3_SET_PREDICATION:
993 {
994 int pred_op;
995 int tmp;
996 if (pkt->count != 1) {
997 DRM_ERROR("bad SET PREDICATION\n");
998 return -EINVAL;
999 }
1000
1001 tmp = radeon_get_ib_value(p, idx + 1);
1002 pred_op = (tmp >> 16) & 0x7;
1003
1004 /* for the clear predicate operation */
1005 if (pred_op == 0)
1006 return 0;
1007
1008 if (pred_op > 2) {
1009 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1010 return -EINVAL;
1011 }
1012
1013 r = evergreen_cs_packet_next_reloc(p, &reloc);
1014 if (r) {
1015 DRM_ERROR("bad SET PREDICATION\n");
1016 return -EINVAL;
1017 }
1018
1019 ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1020 ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
1021 }
1022 break;
cb5fcbd5
AD
1023 case PACKET3_CONTEXT_CONTROL:
1024 if (pkt->count != 1) {
1025 DRM_ERROR("bad CONTEXT_CONTROL\n");
1026 return -EINVAL;
1027 }
1028 break;
1029 case PACKET3_INDEX_TYPE:
1030 case PACKET3_NUM_INSTANCES:
1031 case PACKET3_CLEAR_STATE:
1032 if (pkt->count) {
1033 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1034 return -EINVAL;
1035 }
1036 break;
c175ca9a
AD
1037 case CAYMAN_PACKET3_DEALLOC_STATE:
1038 if (p->rdev->family < CHIP_CAYMAN) {
1039 DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
1040 return -EINVAL;
1041 }
1042 if (pkt->count) {
1043 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1044 return -EINVAL;
1045 }
1046 break;
cb5fcbd5
AD
1047 case PACKET3_INDEX_BASE:
1048 if (pkt->count != 1) {
1049 DRM_ERROR("bad INDEX_BASE\n");
1050 return -EINVAL;
1051 }
1052 r = evergreen_cs_packet_next_reloc(p, &reloc);
1053 if (r) {
1054 DRM_ERROR("bad INDEX_BASE\n");
1055 return -EINVAL;
1056 }
1057 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1058 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1059 r = evergreen_cs_track_check(p);
1060 if (r) {
1061 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1062 return r;
1063 }
1064 break;
1065 case PACKET3_DRAW_INDEX:
1066 if (pkt->count != 3) {
1067 DRM_ERROR("bad DRAW_INDEX\n");
1068 return -EINVAL;
1069 }
1070 r = evergreen_cs_packet_next_reloc(p, &reloc);
1071 if (r) {
1072 DRM_ERROR("bad DRAW_INDEX\n");
1073 return -EINVAL;
1074 }
1075 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1076 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1077 r = evergreen_cs_track_check(p);
1078 if (r) {
1079 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1080 return r;
1081 }
1082 break;
1083 case PACKET3_DRAW_INDEX_2:
1084 if (pkt->count != 4) {
1085 DRM_ERROR("bad DRAW_INDEX_2\n");
1086 return -EINVAL;
1087 }
1088 r = evergreen_cs_packet_next_reloc(p, &reloc);
1089 if (r) {
1090 DRM_ERROR("bad DRAW_INDEX_2\n");
1091 return -EINVAL;
1092 }
1093 ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1094 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1095 r = evergreen_cs_track_check(p);
1096 if (r) {
1097 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1098 return r;
1099 }
1100 break;
1101 case PACKET3_DRAW_INDEX_AUTO:
1102 if (pkt->count != 1) {
1103 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1104 return -EINVAL;
1105 }
1106 r = evergreen_cs_track_check(p);
1107 if (r) {
1108 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1109 return r;
1110 }
1111 break;
1112 case PACKET3_DRAW_INDEX_MULTI_AUTO:
1113 if (pkt->count != 2) {
1114 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
1115 return -EINVAL;
1116 }
1117 r = evergreen_cs_track_check(p);
1118 if (r) {
1119 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1120 return r;
1121 }
1122 break;
1123 case PACKET3_DRAW_INDEX_IMMD:
1124 if (pkt->count < 2) {
1125 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1126 return -EINVAL;
1127 }
1128 r = evergreen_cs_track_check(p);
1129 if (r) {
1130 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1131 return r;
1132 }
1133 break;
1134 case PACKET3_DRAW_INDEX_OFFSET:
1135 if (pkt->count != 2) {
1136 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
1137 return -EINVAL;
1138 }
1139 r = evergreen_cs_track_check(p);
1140 if (r) {
1141 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1142 return r;
1143 }
1144 break;
1145 case PACKET3_DRAW_INDEX_OFFSET_2:
1146 if (pkt->count != 3) {
1147 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
1148 return -EINVAL;
1149 }
1150 r = evergreen_cs_track_check(p);
1151 if (r) {
1152 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1153 return r;
1154 }
1155 break;
1156 case PACKET3_WAIT_REG_MEM:
1157 if (pkt->count != 5) {
1158 DRM_ERROR("bad WAIT_REG_MEM\n");
1159 return -EINVAL;
1160 }
1161 /* bit 4 is reg (0) or mem (1) */
1162 if (idx_value & 0x10) {
1163 r = evergreen_cs_packet_next_reloc(p, &reloc);
1164 if (r) {
1165 DRM_ERROR("bad WAIT_REG_MEM\n");
1166 return -EINVAL;
1167 }
1168 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1169 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1170 }
1171 break;
1172 case PACKET3_SURFACE_SYNC:
1173 if (pkt->count != 3) {
1174 DRM_ERROR("bad SURFACE_SYNC\n");
1175 return -EINVAL;
1176 }
1177 /* 0xffffffff/0x0 is flush all cache flag */
1178 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1179 radeon_get_ib_value(p, idx + 2) != 0) {
1180 r = evergreen_cs_packet_next_reloc(p, &reloc);
1181 if (r) {
1182 DRM_ERROR("bad SURFACE_SYNC\n");
1183 return -EINVAL;
1184 }
1185 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1186 }
1187 break;
1188 case PACKET3_EVENT_WRITE:
1189 if (pkt->count != 2 && pkt->count != 0) {
1190 DRM_ERROR("bad EVENT_WRITE\n");
1191 return -EINVAL;
1192 }
1193 if (pkt->count) {
1194 r = evergreen_cs_packet_next_reloc(p, &reloc);
1195 if (r) {
1196 DRM_ERROR("bad EVENT_WRITE\n");
1197 return -EINVAL;
1198 }
1199 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1200 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1201 }
1202 break;
1203 case PACKET3_EVENT_WRITE_EOP:
1204 if (pkt->count != 4) {
1205 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1206 return -EINVAL;
1207 }
1208 r = evergreen_cs_packet_next_reloc(p, &reloc);
1209 if (r) {
1210 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1211 return -EINVAL;
1212 }
1213 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1214 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1215 break;
1216 case PACKET3_EVENT_WRITE_EOS:
1217 if (pkt->count != 3) {
1218 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1219 return -EINVAL;
1220 }
1221 r = evergreen_cs_packet_next_reloc(p, &reloc);
1222 if (r) {
1223 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1224 return -EINVAL;
1225 }
1226 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1227 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1228 break;
1229 case PACKET3_SET_CONFIG_REG:
1230 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
1231 end_reg = 4 * pkt->count + start_reg - 4;
1232 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
1233 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1234 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1235 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1236 return -EINVAL;
1237 }
1238 for (i = 0; i < pkt->count; i++) {
1239 reg = start_reg + (4 * i);
1240 r = evergreen_cs_check_reg(p, reg, idx+1+i);
1241 if (r)
1242 return r;
1243 }
1244 break;
1245 case PACKET3_SET_CONTEXT_REG:
1246 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
1247 end_reg = 4 * pkt->count + start_reg - 4;
1248 if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
1249 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1250 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1251 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1252 return -EINVAL;
1253 }
1254 for (i = 0; i < pkt->count; i++) {
1255 reg = start_reg + (4 * i);
1256 r = evergreen_cs_check_reg(p, reg, idx+1+i);
1257 if (r)
1258 return r;
1259 }
1260 break;
1261 case PACKET3_SET_RESOURCE:
1262 if (pkt->count % 8) {
1263 DRM_ERROR("bad SET_RESOURCE\n");
1264 return -EINVAL;
1265 }
1266 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
1267 end_reg = 4 * pkt->count + start_reg - 4;
1268 if ((start_reg < PACKET3_SET_RESOURCE_START) ||
1269 (start_reg >= PACKET3_SET_RESOURCE_END) ||
1270 (end_reg >= PACKET3_SET_RESOURCE_END)) {
1271 DRM_ERROR("bad SET_RESOURCE\n");
1272 return -EINVAL;
1273 }
1274 for (i = 0; i < (pkt->count / 8); i++) {
1275 struct radeon_bo *texture, *mipmap;
1276 u32 size, offset;
1277
1278 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
1279 case SQ_TEX_VTX_VALID_TEXTURE:
1280 /* tex base */
1281 r = evergreen_cs_packet_next_reloc(p, &reloc);
1282 if (r) {
1283 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1284 return -EINVAL;
1285 }
09d7e785 1286 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
cb5fcbd5
AD
1287 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1288 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
1289 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1290 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
1291 texture = reloc->robj;
1292 /* tex mip base */
1293 r = evergreen_cs_packet_next_reloc(p, &reloc);
1294 if (r) {
1295 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1296 return -EINVAL;
1297 }
09d7e785 1298 ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
cb5fcbd5
AD
1299 mipmap = reloc->robj;
1300 r = evergreen_check_texture_resource(p, idx+1+(i*8),
1301 texture, mipmap);
1302 if (r)
1303 return r;
1304 break;
1305 case SQ_TEX_VTX_VALID_BUFFER:
1306 /* vtx base */
1307 r = evergreen_cs_packet_next_reloc(p, &reloc);
1308 if (r) {
1309 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
1310 return -EINVAL;
1311 }
1312 offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
1313 size = radeon_get_ib_value(p, idx+1+(i*8)+1);
1314 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1315 /* force size to size of the buffer */
1316 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
1317 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj);
1318 }
1319 ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
1320 ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1321 break;
1322 case SQ_TEX_VTX_INVALID_TEXTURE:
1323 case SQ_TEX_VTX_INVALID_BUFFER:
1324 default:
1325 DRM_ERROR("bad SET_RESOURCE\n");
1326 return -EINVAL;
1327 }
1328 }
1329 break;
1330 case PACKET3_SET_ALU_CONST:
1331 /* XXX fix me ALU const buffers only */
1332 break;
1333 case PACKET3_SET_BOOL_CONST:
1334 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
1335 end_reg = 4 * pkt->count + start_reg - 4;
1336 if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
1337 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
1338 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
1339 DRM_ERROR("bad SET_BOOL_CONST\n");
1340 return -EINVAL;
1341 }
1342 break;
1343 case PACKET3_SET_LOOP_CONST:
1344 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
1345 end_reg = 4 * pkt->count + start_reg - 4;
1346 if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
1347 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
1348 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
1349 DRM_ERROR("bad SET_LOOP_CONST\n");
1350 return -EINVAL;
1351 }
1352 break;
1353 case PACKET3_SET_CTL_CONST:
1354 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
1355 end_reg = 4 * pkt->count + start_reg - 4;
1356 if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
1357 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
1358 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
1359 DRM_ERROR("bad SET_CTL_CONST\n");
1360 return -EINVAL;
1361 }
1362 break;
1363 case PACKET3_SET_SAMPLER:
1364 if (pkt->count % 3) {
1365 DRM_ERROR("bad SET_SAMPLER\n");
1366 return -EINVAL;
1367 }
1368 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
1369 end_reg = 4 * pkt->count + start_reg - 4;
1370 if ((start_reg < PACKET3_SET_SAMPLER_START) ||
1371 (start_reg >= PACKET3_SET_SAMPLER_END) ||
1372 (end_reg >= PACKET3_SET_SAMPLER_END)) {
1373 DRM_ERROR("bad SET_SAMPLER\n");
1374 return -EINVAL;
1375 }
1376 break;
1377 case PACKET3_NOP:
1378 break;
1379 default:
1380 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1381 return -EINVAL;
1382 }
1383 return 0;
1384}
1385
1386int evergreen_cs_parse(struct radeon_cs_parser *p)
1387{
1388 struct radeon_cs_packet pkt;
1389 struct evergreen_cs_track *track;
1390 int r;
1391
1392 if (p->track == NULL) {
1393 /* initialize tracker, we are in kms */
1394 track = kzalloc(sizeof(*track), GFP_KERNEL);
1395 if (track == NULL)
1396 return -ENOMEM;
1397 evergreen_cs_track_init(track);
1398 track->npipes = p->rdev->config.evergreen.tiling_npipes;
1399 track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
1400 track->group_size = p->rdev->config.evergreen.tiling_group_size;
1401 p->track = track;
1402 }
1403 do {
1404 r = evergreen_cs_packet_parse(p, &pkt, p->idx);
1405 if (r) {
1406 kfree(p->track);
1407 p->track = NULL;
1408 return r;
1409 }
1410 p->idx += pkt.count + 2;
1411 switch (pkt.type) {
1412 case PACKET_TYPE0:
1413 r = evergreen_cs_parse_packet0(p, &pkt);
1414 break;
1415 case PACKET_TYPE2:
1416 break;
1417 case PACKET_TYPE3:
1418 r = evergreen_packet3_check(p, &pkt);
1419 break;
1420 default:
1421 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1422 kfree(p->track);
1423 p->track = NULL;
1424 return -EINVAL;
1425 }
1426 if (r) {
1427 kfree(p->track);
1428 p->track = NULL;
1429 return r;
1430 }
1431 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1432#if 0
1433 for (r = 0; r < p->ib->length_dw; r++) {
1434 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
1435 mdelay(1);
1436 }
1437#endif
1438 kfree(p->track);
1439 p->track = NULL;
1440 return 0;
1441}
1442