2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/kernel.h>
32 #include "r600_reg_safe.h"
34 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser
*p
,
35 struct radeon_cs_reloc
**cs_reloc
);
36 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser
*p
,
37 struct radeon_cs_reloc
**cs_reloc
);
38 typedef int (*next_reloc_t
)(struct radeon_cs_parser
*, struct radeon_cs_reloc
**);
39 static next_reloc_t r600_cs_packet_next_reloc
= &r600_cs_packet_next_reloc_mm
;
40 extern void r600_cs_legacy_get_tiling_conf(struct drm_device
*dev
, u32
*npipes
, u32
*nbanks
, u32
*group_size
);
43 struct r600_cs_track
{
44 /* configuration we miror so that we use same code btw kms/ums */
51 u32 cb_color_base_last
[8];
52 struct radeon_bo
*cb_color_bo
[8];
53 u32 cb_color_bo_offset
[8];
54 struct radeon_bo
*cb_color_frag_bo
[8];
55 struct radeon_bo
*cb_color_tile_bo
[8];
57 u32 cb_color_size_idx
[8];
62 u32 vgt_strmout_buffer_en
;
65 u32 db_depth_size_idx
;
69 struct radeon_bo
*db_bo
;
72 static inline int r600_bpe_from_format(u32
*bpe
, u32 format
)
75 case V_038004_COLOR_8
:
76 case V_038004_COLOR_4_4
:
77 case V_038004_COLOR_3_3_2
:
81 case V_038004_COLOR_16
:
82 case V_038004_COLOR_16_FLOAT
:
83 case V_038004_COLOR_8_8
:
84 case V_038004_COLOR_5_6_5
:
85 case V_038004_COLOR_6_5_5
:
86 case V_038004_COLOR_1_5_5_5
:
87 case V_038004_COLOR_4_4_4_4
:
88 case V_038004_COLOR_5_5_5_1
:
91 case V_038004_FMT_8_8_8
:
94 case V_038004_COLOR_32
:
95 case V_038004_COLOR_32_FLOAT
:
96 case V_038004_COLOR_16_16
:
97 case V_038004_COLOR_16_16_FLOAT
:
98 case V_038004_COLOR_8_24
:
99 case V_038004_COLOR_8_24_FLOAT
:
100 case V_038004_COLOR_24_8
:
101 case V_038004_COLOR_24_8_FLOAT
:
102 case V_038004_COLOR_10_11_11
:
103 case V_038004_COLOR_10_11_11_FLOAT
:
104 case V_038004_COLOR_11_11_10
:
105 case V_038004_COLOR_11_11_10_FLOAT
:
106 case V_038004_COLOR_2_10_10_10
:
107 case V_038004_COLOR_8_8_8_8
:
108 case V_038004_COLOR_10_10_10_2
:
109 case V_038004_FMT_5_9_9_9_SHAREDEXP
:
110 case V_038004_FMT_32_AS_8
:
111 case V_038004_FMT_32_AS_8_8
:
114 case V_038004_COLOR_X24_8_32_FLOAT
:
115 case V_038004_COLOR_32_32
:
116 case V_038004_COLOR_32_32_FLOAT
:
117 case V_038004_COLOR_16_16_16_16
:
118 case V_038004_COLOR_16_16_16_16_FLOAT
:
121 case V_038004_FMT_16_16_16
:
122 case V_038004_FMT_16_16_16_FLOAT
:
125 case V_038004_FMT_32_32_32
:
126 case V_038004_FMT_32_32_32_FLOAT
:
129 case V_038004_COLOR_32_32_32_32
:
130 case V_038004_COLOR_32_32_32_32_FLOAT
:
133 case V_038004_FMT_GB_GR
:
134 case V_038004_FMT_BG_RG
:
135 case V_038004_COLOR_INVALID
:
143 static void r600_cs_track_init(struct r600_cs_track
*track
)
147 /* assume DX9 mode */
148 track
->sq_config
= DX9_CONSTS
;
149 for (i
= 0; i
< 8; i
++) {
150 track
->cb_color_base_last
[i
] = 0;
151 track
->cb_color_size
[i
] = 0;
152 track
->cb_color_size_idx
[i
] = 0;
153 track
->cb_color_info
[i
] = 0;
154 track
->cb_color_bo
[i
] = NULL
;
155 track
->cb_color_bo_offset
[i
] = 0xFFFFFFFF;
157 track
->cb_target_mask
= 0xFFFFFFFF;
158 track
->cb_shader_mask
= 0xFFFFFFFF;
160 /* assume the biggest format and that htile is enabled */
161 track
->db_depth_info
= 7 | (1 << 25);
162 track
->db_depth_view
= 0xFFFFC000;
163 track
->db_depth_size
= 0xFFFFFFFF;
164 track
->db_depth_size_idx
= 0;
165 track
->db_depth_control
= 0xFFFFFFFF;
168 static inline int r600_cs_track_validate_cb(struct radeon_cs_parser
*p
, int i
)
170 struct r600_cs_track
*track
= p
->track
;
171 u32 bpe
= 0, pitch
, slice_tile_max
, size
, tmp
, height
, pitch_align
;
172 volatile u32
*ib
= p
->ib
->ptr
;
174 if (G_0280A0_TILE_MODE(track
->cb_color_info
[i
])) {
175 dev_warn(p
->dev
, "FMASK or CMASK buffer are not supported by this kernel\n");
178 size
= radeon_bo_size(track
->cb_color_bo
[i
]) - track
->cb_color_bo_offset
[i
];
179 if (r600_bpe_from_format(&bpe
, G_0280A0_FORMAT(track
->cb_color_info
[i
]))) {
180 dev_warn(p
->dev
, "%s:%d cb invalid format %d for %d (0x%08X)\n",
181 __func__
, __LINE__
, G_0280A0_FORMAT(track
->cb_color_info
[i
]),
182 i
, track
->cb_color_info
[i
]);
185 /* pitch is the number of 8x8 tiles per row */
186 pitch
= G_028060_PITCH_TILE_MAX(track
->cb_color_size
[i
]) + 1;
187 slice_tile_max
= G_028060_SLICE_TILE_MAX(track
->cb_color_size
[i
]) + 1;
188 height
= size
/ (pitch
* 8 * bpe
);
193 switch (G_0280A0_ARRAY_MODE(track
->cb_color_info
[i
])) {
194 case V_0280A0_ARRAY_LINEAR_GENERAL
:
195 /* technically height & 0x7 */
197 case V_0280A0_ARRAY_LINEAR_ALIGNED
:
198 pitch_align
= max((u32
)64, (u32
)(track
->group_size
/ bpe
)) / 8;
199 if (!IS_ALIGNED(pitch
, pitch_align
)) {
200 dev_warn(p
->dev
, "%s:%d cb pitch (%d) invalid\n",
201 __func__
, __LINE__
, pitch
);
204 if (!IS_ALIGNED(height
, 8)) {
205 dev_warn(p
->dev
, "%s:%d cb height (%d) invalid\n",
206 __func__
, __LINE__
, height
);
210 case V_0280A0_ARRAY_1D_TILED_THIN1
:
211 pitch_align
= max((u32
)8, (u32
)(track
->group_size
/ (8 * bpe
* track
->nsamples
))) / 8;
212 if (!IS_ALIGNED(pitch
, pitch_align
)) {
213 dev_warn(p
->dev
, "%s:%d cb pitch (%d) invalid\n",
214 __func__
, __LINE__
, pitch
);
217 if (!IS_ALIGNED(height
, 8)) {
218 dev_warn(p
->dev
, "%s:%d cb height (%d) invalid\n",
219 __func__
, __LINE__
, height
);
223 case V_0280A0_ARRAY_2D_TILED_THIN1
:
224 pitch_align
= max((u32
)track
->nbanks
,
225 (u32
)(((track
->group_size
/ 8) / (bpe
* track
->nsamples
)) * track
->nbanks
));
226 if (!IS_ALIGNED(pitch
, pitch_align
)) {
227 dev_warn(p
->dev
, "%s:%d cb pitch (%d) invalid\n",
228 __func__
, __LINE__
, pitch
);
231 if (!IS_ALIGNED((height
/ 8), track
->nbanks
)) {
232 dev_warn(p
->dev
, "%s:%d cb height (%d) invalid\n",
233 __func__
, __LINE__
, height
);
238 dev_warn(p
->dev
, "%s invalid tiling %d for %d (0x%08X)\n", __func__
,
239 G_0280A0_ARRAY_MODE(track
->cb_color_info
[i
]), i
,
240 track
->cb_color_info
[i
]);
244 tmp
= height
* pitch
* 8 * bpe
;
245 if ((tmp
+ track
->cb_color_bo_offset
[i
]) > radeon_bo_size(track
->cb_color_bo
[i
])) {
246 dev_warn(p
->dev
, "%s offset[%d] %d too big\n", __func__
, i
, track
->cb_color_bo_offset
[i
]);
249 if (!IS_ALIGNED(track
->cb_color_bo_offset
[i
], track
->group_size
)) {
250 dev_warn(p
->dev
, "%s offset[%d] %d not aligned\n", __func__
, i
, track
->cb_color_bo_offset
[i
]);
254 tmp
= (height
* pitch
* 8) >> 6;
255 if (tmp
< slice_tile_max
)
256 slice_tile_max
= tmp
;
257 tmp
= S_028060_PITCH_TILE_MAX(pitch
- 1) |
258 S_028060_SLICE_TILE_MAX(slice_tile_max
- 1);
259 ib
[track
->cb_color_size_idx
[i
]] = tmp
;
263 static int r600_cs_track_check(struct radeon_cs_parser
*p
)
265 struct r600_cs_track
*track
= p
->track
;
268 volatile u32
*ib
= p
->ib
->ptr
;
270 /* on legacy kernel we don't perform advanced check */
273 /* we don't support out buffer yet */
274 if (track
->vgt_strmout_en
|| track
->vgt_strmout_buffer_en
) {
275 dev_warn(p
->dev
, "this kernel doesn't support SMX output buffer\n");
278 /* check that we have a cb for each enabled target, we don't check
279 * shader_mask because it seems mesa isn't always setting it :(
281 tmp
= track
->cb_target_mask
;
282 for (i
= 0; i
< 8; i
++) {
283 if ((tmp
>> (i
* 4)) & 0xF) {
284 /* at least one component is enabled */
285 if (track
->cb_color_bo
[i
] == NULL
) {
286 dev_warn(p
->dev
, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
287 __func__
, __LINE__
, track
->cb_target_mask
, track
->cb_shader_mask
, i
);
290 /* perform rewrite of CB_COLOR[0-7]_SIZE */
291 r
= r600_cs_track_validate_cb(p
, i
);
296 /* Check depth buffer */
297 if (G_028800_STENCIL_ENABLE(track
->db_depth_control
) ||
298 G_028800_Z_ENABLE(track
->db_depth_control
)) {
299 u32 nviews
, bpe
, ntiles
, pitch
, pitch_align
, height
, size
;
300 if (track
->db_bo
== NULL
) {
301 dev_warn(p
->dev
, "z/stencil with no depth buffer\n");
304 if (G_028010_TILE_SURFACE_ENABLE(track
->db_depth_info
)) {
305 dev_warn(p
->dev
, "this kernel doesn't support z/stencil htile\n");
308 switch (G_028010_FORMAT(track
->db_depth_info
)) {
309 case V_028010_DEPTH_16
:
312 case V_028010_DEPTH_X8_24
:
313 case V_028010_DEPTH_8_24
:
314 case V_028010_DEPTH_X8_24_FLOAT
:
315 case V_028010_DEPTH_8_24_FLOAT
:
316 case V_028010_DEPTH_32_FLOAT
:
319 case V_028010_DEPTH_X24_8_32_FLOAT
:
323 dev_warn(p
->dev
, "z/stencil with invalid format %d\n", G_028010_FORMAT(track
->db_depth_info
));
326 if ((track
->db_depth_size
& 0xFFFFFC00) == 0xFFFFFC00) {
327 if (!track
->db_depth_size_idx
) {
328 dev_warn(p
->dev
, "z/stencil buffer size not set\n");
331 tmp
= radeon_bo_size(track
->db_bo
) - track
->db_offset
;
332 tmp
= (tmp
/ bpe
) >> 6;
334 dev_warn(p
->dev
, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
335 track
->db_depth_size
, bpe
, track
->db_offset
,
336 radeon_bo_size(track
->db_bo
));
339 ib
[track
->db_depth_size_idx
] = S_028000_SLICE_TILE_MAX(tmp
- 1) | (track
->db_depth_size
& 0x3FF);
341 size
= radeon_bo_size(track
->db_bo
);
342 pitch
= G_028000_PITCH_TILE_MAX(track
->db_depth_size
) + 1;
343 height
= size
/ (pitch
* 8 * bpe
);
348 switch (G_028010_ARRAY_MODE(track
->db_depth_info
)) {
349 case V_028010_ARRAY_1D_TILED_THIN1
:
350 pitch_align
= (max((u32
)8, (u32
)(track
->group_size
/ (8 * bpe
))) / 8);
351 if (!IS_ALIGNED(pitch
, pitch_align
)) {
352 dev_warn(p
->dev
, "%s:%d db pitch (%d) invalid\n",
353 __func__
, __LINE__
, pitch
);
356 if (!IS_ALIGNED(height
, 8)) {
357 dev_warn(p
->dev
, "%s:%d db height (%d) invalid\n",
358 __func__
, __LINE__
, height
);
362 case V_028010_ARRAY_2D_TILED_THIN1
:
363 pitch_align
= max((u32
)track
->nbanks
,
364 (u32
)(((track
->group_size
/ 8) / bpe
) * track
->nbanks
));
365 if (!IS_ALIGNED(pitch
, pitch_align
)) {
366 dev_warn(p
->dev
, "%s:%d db pitch (%d) invalid\n",
367 __func__
, __LINE__
, pitch
);
370 if ((height
/ 8) & (track
->nbanks
- 1)) {
371 dev_warn(p
->dev
, "%s:%d db height (%d) invalid\n",
372 __func__
, __LINE__
, height
);
377 dev_warn(p
->dev
, "%s invalid tiling %d (0x%08X)\n", __func__
,
378 G_028010_ARRAY_MODE(track
->db_depth_info
),
379 track
->db_depth_info
);
382 if (!IS_ALIGNED(track
->db_offset
, track
->group_size
)) {
383 dev_warn(p
->dev
, "%s offset[%d] %d not aligned\n", __func__
, i
, track
->db_offset
);
386 ntiles
= G_028000_SLICE_TILE_MAX(track
->db_depth_size
) + 1;
387 nviews
= G_028004_SLICE_MAX(track
->db_depth_view
) + 1;
388 tmp
= ntiles
* bpe
* 64 * nviews
;
389 if ((tmp
+ track
->db_offset
) > radeon_bo_size(track
->db_bo
)) {
390 dev_warn(p
->dev
, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n",
391 track
->db_depth_size
, ntiles
, nviews
, bpe
, tmp
+ track
->db_offset
,
392 radeon_bo_size(track
->db_bo
));
401 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
402 * @parser: parser structure holding parsing context.
403 * @pkt: where to store packet informations
405 * Assume that chunk_ib_index is properly set. Will return -EINVAL
406 * if packet is bigger than remaining ib size. or if packets is unknown.
408 int r600_cs_packet_parse(struct radeon_cs_parser
*p
,
409 struct radeon_cs_packet
*pkt
,
412 struct radeon_cs_chunk
*ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
415 if (idx
>= ib_chunk
->length_dw
) {
416 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
417 idx
, ib_chunk
->length_dw
);
420 header
= radeon_get_ib_value(p
, idx
);
422 pkt
->type
= CP_PACKET_GET_TYPE(header
);
423 pkt
->count
= CP_PACKET_GET_COUNT(header
);
427 pkt
->reg
= CP_PACKET0_GET_REG(header
);
430 pkt
->opcode
= CP_PACKET3_GET_OPCODE(header
);
436 DRM_ERROR("Unknown packet type %d at %d !\n", pkt
->type
, idx
);
439 if ((pkt
->count
+ 1 + pkt
->idx
) >= ib_chunk
->length_dw
) {
440 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
441 pkt
->idx
, pkt
->type
, pkt
->count
, ib_chunk
->length_dw
);
448 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
449 * @parser: parser structure holding parsing context.
450 * @data: pointer to relocation data
451 * @offset_start: starting offset
452 * @offset_mask: offset mask (to align start offset on)
453 * @reloc: reloc informations
455 * Check next packet is relocation packet3, do bo validation and compute
456 * GPU offset using the provided start.
458 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser
*p
,
459 struct radeon_cs_reloc
**cs_reloc
)
461 struct radeon_cs_chunk
*relocs_chunk
;
462 struct radeon_cs_packet p3reloc
;
466 if (p
->chunk_relocs_idx
== -1) {
467 DRM_ERROR("No relocation chunk !\n");
471 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
472 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
476 p
->idx
+= p3reloc
.count
+ 2;
477 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
478 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
482 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
483 if (idx
>= relocs_chunk
->length_dw
) {
484 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
485 idx
, relocs_chunk
->length_dw
);
488 /* FIXME: we assume reloc size is 4 dwords */
489 *cs_reloc
= p
->relocs_ptr
[(idx
/ 4)];
494 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
495 * @parser: parser structure holding parsing context.
496 * @data: pointer to relocation data
497 * @offset_start: starting offset
498 * @offset_mask: offset mask (to align start offset on)
499 * @reloc: reloc informations
501 * Check next packet is relocation packet3, do bo validation and compute
502 * GPU offset using the provided start.
504 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser
*p
,
505 struct radeon_cs_reloc
**cs_reloc
)
507 struct radeon_cs_chunk
*relocs_chunk
;
508 struct radeon_cs_packet p3reloc
;
512 if (p
->chunk_relocs_idx
== -1) {
513 DRM_ERROR("No relocation chunk !\n");
517 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
518 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
522 p
->idx
+= p3reloc
.count
+ 2;
523 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
524 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
528 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
529 if (idx
>= relocs_chunk
->length_dw
) {
530 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
531 idx
, relocs_chunk
->length_dw
);
534 *cs_reloc
= p
->relocs
;
535 (*cs_reloc
)->lobj
.gpu_offset
= (u64
)relocs_chunk
->kdata
[idx
+ 3] << 32;
536 (*cs_reloc
)->lobj
.gpu_offset
|= relocs_chunk
->kdata
[idx
+ 0];
541 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
542 * @parser: parser structure holding parsing context.
544 * Check next packet is relocation packet3, do bo validation and compute
545 * GPU offset using the provided start.
547 static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser
*p
)
549 struct radeon_cs_packet p3reloc
;
552 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
556 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
563 * r600_cs_packet_next_vline() - parse userspace VLINE packet
564 * @parser: parser structure holding parsing context.
566 * Userspace sends a special sequence for VLINE waits.
567 * PACKET0 - VLINE_START_END + value
568 * PACKET3 - WAIT_REG_MEM poll vline status reg
569 * RELOC (P3) - crtc_id in reloc.
571 * This function parses this and relocates the VLINE START END
572 * and WAIT_REG_MEM packets to the correct crtc.
573 * It also detects a switched off crtc and nulls out the
576 static int r600_cs_packet_parse_vline(struct radeon_cs_parser
*p
)
578 struct drm_mode_object
*obj
;
579 struct drm_crtc
*crtc
;
580 struct radeon_crtc
*radeon_crtc
;
581 struct radeon_cs_packet p3reloc
, wait_reg_mem
;
584 uint32_t header
, h_idx
, reg
, wait_reg_mem_info
;
585 volatile uint32_t *ib
;
589 /* parse the WAIT_REG_MEM */
590 r
= r600_cs_packet_parse(p
, &wait_reg_mem
, p
->idx
);
594 /* check its a WAIT_REG_MEM */
595 if (wait_reg_mem
.type
!= PACKET_TYPE3
||
596 wait_reg_mem
.opcode
!= PACKET3_WAIT_REG_MEM
) {
597 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
602 wait_reg_mem_info
= radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 1);
603 /* bit 4 is reg (0) or mem (1) */
604 if (wait_reg_mem_info
& 0x10) {
605 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
609 /* waiting for value to be equal */
610 if ((wait_reg_mem_info
& 0x7) != 0x3) {
611 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
615 if ((radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 2) << 2) != AVIVO_D1MODE_VLINE_STATUS
) {
616 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
621 if (radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 5) != AVIVO_D1MODE_VLINE_STAT
) {
622 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
627 /* jump over the NOP */
628 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
+ wait_reg_mem
.count
+ 2);
633 p
->idx
+= wait_reg_mem
.count
+ 2;
634 p
->idx
+= p3reloc
.count
+ 2;
636 header
= radeon_get_ib_value(p
, h_idx
);
637 crtc_id
= radeon_get_ib_value(p
, h_idx
+ 2 + 7 + 1);
638 reg
= CP_PACKET0_GET_REG(header
);
640 obj
= drm_mode_object_find(p
->rdev
->ddev
, crtc_id
, DRM_MODE_OBJECT_CRTC
);
642 DRM_ERROR("cannot find crtc %d\n", crtc_id
);
646 crtc
= obj_to_crtc(obj
);
647 radeon_crtc
= to_radeon_crtc(crtc
);
648 crtc_id
= radeon_crtc
->crtc_id
;
650 if (!crtc
->enabled
) {
651 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
652 ib
[h_idx
+ 2] = PACKET2(0);
653 ib
[h_idx
+ 3] = PACKET2(0);
654 ib
[h_idx
+ 4] = PACKET2(0);
655 ib
[h_idx
+ 5] = PACKET2(0);
656 ib
[h_idx
+ 6] = PACKET2(0);
657 ib
[h_idx
+ 7] = PACKET2(0);
658 ib
[h_idx
+ 8] = PACKET2(0);
659 } else if (crtc_id
== 1) {
661 case AVIVO_D1MODE_VLINE_START_END
:
662 header
&= ~R600_CP_PACKET0_REG_MASK
;
663 header
|= AVIVO_D2MODE_VLINE_START_END
>> 2;
666 DRM_ERROR("unknown crtc reloc\n");
671 ib
[h_idx
+ 4] = AVIVO_D2MODE_VLINE_STATUS
>> 2;
677 static int r600_packet0_check(struct radeon_cs_parser
*p
,
678 struct radeon_cs_packet
*pkt
,
679 unsigned idx
, unsigned reg
)
684 case AVIVO_D1MODE_VLINE_START_END
:
685 r
= r600_cs_packet_parse_vline(p
);
687 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
693 printk(KERN_ERR
"Forbidden register 0x%04X in cs at %d\n",
700 static int r600_cs_parse_packet0(struct radeon_cs_parser
*p
,
701 struct radeon_cs_packet
*pkt
)
709 for (i
= 0; i
<= pkt
->count
; i
++, idx
++, reg
+= 4) {
710 r
= r600_packet0_check(p
, pkt
, idx
, reg
);
719 * r600_cs_check_reg() - check if register is authorized or not
720 * @parser: parser structure holding parsing context
721 * @reg: register we are testing
722 * @idx: index into the cs buffer
724 * This function will test against r600_reg_safe_bm and return 0
725 * if register is safe. If register is not flag as safe this function
726 * will test it against a list of register needind special handling.
728 static inline int r600_cs_check_reg(struct radeon_cs_parser
*p
, u32 reg
, u32 idx
)
730 struct r600_cs_track
*track
= (struct r600_cs_track
*)p
->track
;
731 struct radeon_cs_reloc
*reloc
;
732 u32 last_reg
= ARRAY_SIZE(r600_reg_safe_bm
);
738 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
741 m
= 1 << ((reg
>> 2) & 31);
742 if (!(r600_reg_safe_bm
[i
] & m
))
746 /* force following reg to 0 in an attemp to disable out buffer
747 * which will need us to better understand how it works to perform
748 * security check on it (Jerome)
750 case R_0288A8_SQ_ESGS_RING_ITEMSIZE
:
751 case R_008C44_SQ_ESGS_RING_SIZE
:
752 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE
:
753 case R_008C54_SQ_ESTMP_RING_SIZE
:
754 case R_0288C0_SQ_FBUF_RING_ITEMSIZE
:
755 case R_008C74_SQ_FBUF_RING_SIZE
:
756 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE
:
757 case R_008C5C_SQ_GSTMP_RING_SIZE
:
758 case R_0288AC_SQ_GSVS_RING_ITEMSIZE
:
759 case R_008C4C_SQ_GSVS_RING_SIZE
:
760 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE
:
761 case R_008C6C_SQ_PSTMP_RING_SIZE
:
762 case R_0288C4_SQ_REDUC_RING_ITEMSIZE
:
763 case R_008C7C_SQ_REDUC_RING_SIZE
:
764 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE
:
765 case R_008C64_SQ_VSTMP_RING_SIZE
:
766 case R_0288C8_SQ_GS_VERT_ITEMSIZE
:
767 /* get value to populate the IB don't remove */
768 tmp
=radeon_get_ib_value(p
, idx
);
772 track
->sq_config
= radeon_get_ib_value(p
, idx
);
774 case R_028800_DB_DEPTH_CONTROL
:
775 track
->db_depth_control
= radeon_get_ib_value(p
, idx
);
777 case R_028010_DB_DEPTH_INFO
:
778 if (r600_cs_packet_next_is_pkt3_nop(p
)) {
779 r
= r600_cs_packet_next_reloc(p
, &reloc
);
781 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
785 track
->db_depth_info
= radeon_get_ib_value(p
, idx
);
786 ib
[idx
] &= C_028010_ARRAY_MODE
;
787 track
->db_depth_info
&= C_028010_ARRAY_MODE
;
788 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
789 ib
[idx
] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1
);
790 track
->db_depth_info
|= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1
);
792 ib
[idx
] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1
);
793 track
->db_depth_info
|= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1
);
796 track
->db_depth_info
= radeon_get_ib_value(p
, idx
);
798 case R_028004_DB_DEPTH_VIEW
:
799 track
->db_depth_view
= radeon_get_ib_value(p
, idx
);
801 case R_028000_DB_DEPTH_SIZE
:
802 track
->db_depth_size
= radeon_get_ib_value(p
, idx
);
803 track
->db_depth_size_idx
= idx
;
805 case R_028AB0_VGT_STRMOUT_EN
:
806 track
->vgt_strmout_en
= radeon_get_ib_value(p
, idx
);
808 case R_028B20_VGT_STRMOUT_BUFFER_EN
:
809 track
->vgt_strmout_buffer_en
= radeon_get_ib_value(p
, idx
);
811 case R_028238_CB_TARGET_MASK
:
812 track
->cb_target_mask
= radeon_get_ib_value(p
, idx
);
814 case R_02823C_CB_SHADER_MASK
:
815 track
->cb_shader_mask
= radeon_get_ib_value(p
, idx
);
817 case R_028C04_PA_SC_AA_CONFIG
:
818 tmp
= G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p
, idx
));
819 track
->nsamples
= 1 << tmp
;
821 case R_0280A0_CB_COLOR0_INFO
:
822 case R_0280A4_CB_COLOR1_INFO
:
823 case R_0280A8_CB_COLOR2_INFO
:
824 case R_0280AC_CB_COLOR3_INFO
:
825 case R_0280B0_CB_COLOR4_INFO
:
826 case R_0280B4_CB_COLOR5_INFO
:
827 case R_0280B8_CB_COLOR6_INFO
:
828 case R_0280BC_CB_COLOR7_INFO
:
829 if (r600_cs_packet_next_is_pkt3_nop(p
)) {
830 r
= r600_cs_packet_next_reloc(p
, &reloc
);
832 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
835 tmp
= (reg
- R_0280A0_CB_COLOR0_INFO
) / 4;
836 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
837 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
838 ib
[idx
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1
);
839 track
->cb_color_info
[tmp
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1
);
840 } else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
) {
841 ib
[idx
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1
);
842 track
->cb_color_info
[tmp
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1
);
845 tmp
= (reg
- R_0280A0_CB_COLOR0_INFO
) / 4;
846 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
849 case R_028060_CB_COLOR0_SIZE
:
850 case R_028064_CB_COLOR1_SIZE
:
851 case R_028068_CB_COLOR2_SIZE
:
852 case R_02806C_CB_COLOR3_SIZE
:
853 case R_028070_CB_COLOR4_SIZE
:
854 case R_028074_CB_COLOR5_SIZE
:
855 case R_028078_CB_COLOR6_SIZE
:
856 case R_02807C_CB_COLOR7_SIZE
:
857 tmp
= (reg
- R_028060_CB_COLOR0_SIZE
) / 4;
858 track
->cb_color_size
[tmp
] = radeon_get_ib_value(p
, idx
);
859 track
->cb_color_size_idx
[tmp
] = idx
;
861 /* This register were added late, there is userspace
862 * which does provide relocation for those but set
863 * 0 offset. In order to avoid breaking old userspace
864 * we detect this and set address to point to last
865 * CB_COLOR0_BASE, note that if userspace doesn't set
866 * CB_COLOR0_BASE before this register we will report
867 * error. Old userspace always set CB_COLOR0_BASE
868 * before any of this.
870 case R_0280E0_CB_COLOR0_FRAG
:
871 case R_0280E4_CB_COLOR1_FRAG
:
872 case R_0280E8_CB_COLOR2_FRAG
:
873 case R_0280EC_CB_COLOR3_FRAG
:
874 case R_0280F0_CB_COLOR4_FRAG
:
875 case R_0280F4_CB_COLOR5_FRAG
:
876 case R_0280F8_CB_COLOR6_FRAG
:
877 case R_0280FC_CB_COLOR7_FRAG
:
878 tmp
= (reg
- R_0280E0_CB_COLOR0_FRAG
) / 4;
879 if (!r600_cs_packet_next_is_pkt3_nop(p
)) {
880 if (!track
->cb_color_base_last
[tmp
]) {
881 dev_err(p
->dev
, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg
);
884 ib
[idx
] = track
->cb_color_base_last
[tmp
];
885 track
->cb_color_frag_bo
[tmp
] = track
->cb_color_bo
[tmp
];
887 r
= r600_cs_packet_next_reloc(p
, &reloc
);
889 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
892 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
893 track
->cb_color_frag_bo
[tmp
] = reloc
->robj
;
896 case R_0280C0_CB_COLOR0_TILE
:
897 case R_0280C4_CB_COLOR1_TILE
:
898 case R_0280C8_CB_COLOR2_TILE
:
899 case R_0280CC_CB_COLOR3_TILE
:
900 case R_0280D0_CB_COLOR4_TILE
:
901 case R_0280D4_CB_COLOR5_TILE
:
902 case R_0280D8_CB_COLOR6_TILE
:
903 case R_0280DC_CB_COLOR7_TILE
:
904 tmp
= (reg
- R_0280C0_CB_COLOR0_TILE
) / 4;
905 if (!r600_cs_packet_next_is_pkt3_nop(p
)) {
906 if (!track
->cb_color_base_last
[tmp
]) {
907 dev_err(p
->dev
, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg
);
910 ib
[idx
] = track
->cb_color_base_last
[tmp
];
911 track
->cb_color_tile_bo
[tmp
] = track
->cb_color_bo
[tmp
];
913 r
= r600_cs_packet_next_reloc(p
, &reloc
);
915 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
918 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
919 track
->cb_color_tile_bo
[tmp
] = reloc
->robj
;
930 r
= r600_cs_packet_next_reloc(p
, &reloc
);
932 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
936 tmp
= (reg
- CB_COLOR0_BASE
) / 4;
937 track
->cb_color_bo_offset
[tmp
] = radeon_get_ib_value(p
, idx
) << 8;
938 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
939 track
->cb_color_base_last
[tmp
] = ib
[idx
];
940 track
->cb_color_bo
[tmp
] = reloc
->robj
;
943 r
= r600_cs_packet_next_reloc(p
, &reloc
);
945 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
949 track
->db_offset
= radeon_get_ib_value(p
, idx
) << 8;
950 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
951 track
->db_bo
= reloc
->robj
;
953 case DB_HTILE_DATA_BASE
:
954 case SQ_PGM_START_FS
:
955 case SQ_PGM_START_ES
:
956 case SQ_PGM_START_VS
:
957 case SQ_PGM_START_GS
:
958 case SQ_PGM_START_PS
:
959 case SQ_ALU_CONST_CACHE_GS_0
:
960 case SQ_ALU_CONST_CACHE_GS_1
:
961 case SQ_ALU_CONST_CACHE_GS_2
:
962 case SQ_ALU_CONST_CACHE_GS_3
:
963 case SQ_ALU_CONST_CACHE_GS_4
:
964 case SQ_ALU_CONST_CACHE_GS_5
:
965 case SQ_ALU_CONST_CACHE_GS_6
:
966 case SQ_ALU_CONST_CACHE_GS_7
:
967 case SQ_ALU_CONST_CACHE_GS_8
:
968 case SQ_ALU_CONST_CACHE_GS_9
:
969 case SQ_ALU_CONST_CACHE_GS_10
:
970 case SQ_ALU_CONST_CACHE_GS_11
:
971 case SQ_ALU_CONST_CACHE_GS_12
:
972 case SQ_ALU_CONST_CACHE_GS_13
:
973 case SQ_ALU_CONST_CACHE_GS_14
:
974 case SQ_ALU_CONST_CACHE_GS_15
:
975 case SQ_ALU_CONST_CACHE_PS_0
:
976 case SQ_ALU_CONST_CACHE_PS_1
:
977 case SQ_ALU_CONST_CACHE_PS_2
:
978 case SQ_ALU_CONST_CACHE_PS_3
:
979 case SQ_ALU_CONST_CACHE_PS_4
:
980 case SQ_ALU_CONST_CACHE_PS_5
:
981 case SQ_ALU_CONST_CACHE_PS_6
:
982 case SQ_ALU_CONST_CACHE_PS_7
:
983 case SQ_ALU_CONST_CACHE_PS_8
:
984 case SQ_ALU_CONST_CACHE_PS_9
:
985 case SQ_ALU_CONST_CACHE_PS_10
:
986 case SQ_ALU_CONST_CACHE_PS_11
:
987 case SQ_ALU_CONST_CACHE_PS_12
:
988 case SQ_ALU_CONST_CACHE_PS_13
:
989 case SQ_ALU_CONST_CACHE_PS_14
:
990 case SQ_ALU_CONST_CACHE_PS_15
:
991 case SQ_ALU_CONST_CACHE_VS_0
:
992 case SQ_ALU_CONST_CACHE_VS_1
:
993 case SQ_ALU_CONST_CACHE_VS_2
:
994 case SQ_ALU_CONST_CACHE_VS_3
:
995 case SQ_ALU_CONST_CACHE_VS_4
:
996 case SQ_ALU_CONST_CACHE_VS_5
:
997 case SQ_ALU_CONST_CACHE_VS_6
:
998 case SQ_ALU_CONST_CACHE_VS_7
:
999 case SQ_ALU_CONST_CACHE_VS_8
:
1000 case SQ_ALU_CONST_CACHE_VS_9
:
1001 case SQ_ALU_CONST_CACHE_VS_10
:
1002 case SQ_ALU_CONST_CACHE_VS_11
:
1003 case SQ_ALU_CONST_CACHE_VS_12
:
1004 case SQ_ALU_CONST_CACHE_VS_13
:
1005 case SQ_ALU_CONST_CACHE_VS_14
:
1006 case SQ_ALU_CONST_CACHE_VS_15
:
1007 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1009 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
1013 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1016 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
1022 static inline unsigned minify(unsigned size
, unsigned levels
)
1024 size
= size
>> levels
;
1030 static void r600_texture_size(unsigned nfaces
, unsigned blevel
, unsigned nlevels
,
1031 unsigned w0
, unsigned h0
, unsigned d0
, unsigned bpe
,
1032 unsigned pitch_align
,
1033 unsigned *l0_size
, unsigned *mipmap_size
)
1035 unsigned offset
, i
, level
, face
;
1036 unsigned width
, height
, depth
, rowstride
, size
;
1041 for(i
= 0, offset
= 0, level
= blevel
; i
< nlevels
; i
++, level
++) {
1042 width
= minify(w0
, i
);
1043 height
= minify(h0
, i
);
1044 depth
= minify(d0
, i
);
1045 for(face
= 0; face
< nfaces
; face
++) {
1046 rowstride
= ALIGN((width
* bpe
), pitch_align
);
1047 size
= height
* rowstride
* depth
;
1049 offset
= (offset
+ 0x1f) & ~0x1f;
1052 *l0_size
= ALIGN((w0
* bpe
), pitch_align
) * h0
* d0
;
1053 *mipmap_size
= offset
;
1055 *mipmap_size
= *l0_size
;
1057 *mipmap_size
-= *l0_size
;
1061 * r600_check_texture_resource() - check if register is authorized or not
1062 * @p: parser structure holding parsing context
1063 * @idx: index into the cs buffer
1064 * @texture: texture's bo structure
1065 * @mipmap: mipmap's bo structure
1067 * This function will check that the resource has valid field and that
1068 * the texture and mipmap bo object are big enough to cover this resource.
1070 static inline int r600_check_texture_resource(struct radeon_cs_parser
*p
, u32 idx
,
1071 struct radeon_bo
*texture
,
1072 struct radeon_bo
*mipmap
,
1075 struct r600_cs_track
*track
= p
->track
;
1076 u32 nfaces
, nlevels
, blevel
, w0
, h0
, d0
, bpe
= 0;
1077 u32 word0
, word1
, l0_size
, mipmap_size
, pitch
, pitch_align
;
1079 /* on legacy kernel we don't perform advanced check */
1080 if (p
->rdev
== NULL
)
1083 word0
= radeon_get_ib_value(p
, idx
+ 0);
1084 if (tiling_flags
& RADEON_TILING_MACRO
)
1085 word0
|= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1
);
1086 else if (tiling_flags
& RADEON_TILING_MICRO
)
1087 word0
|= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1
);
1088 word1
= radeon_get_ib_value(p
, idx
+ 1);
1089 w0
= G_038000_TEX_WIDTH(word0
) + 1;
1090 h0
= G_038004_TEX_HEIGHT(word1
) + 1;
1091 d0
= G_038004_TEX_DEPTH(word1
);
1093 switch (G_038000_DIM(word0
)) {
1094 case V_038000_SQ_TEX_DIM_1D
:
1095 case V_038000_SQ_TEX_DIM_2D
:
1096 case V_038000_SQ_TEX_DIM_3D
:
1098 case V_038000_SQ_TEX_DIM_CUBEMAP
:
1101 case V_038000_SQ_TEX_DIM_1D_ARRAY
:
1102 case V_038000_SQ_TEX_DIM_2D_ARRAY
:
1103 case V_038000_SQ_TEX_DIM_2D_MSAA
:
1104 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA
:
1106 dev_warn(p
->dev
, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0
));
1109 if (r600_bpe_from_format(&bpe
, G_038004_DATA_FORMAT(word1
))) {
1110 dev_warn(p
->dev
, "%s:%d texture invalid format %d\n",
1111 __func__
, __LINE__
, G_038004_DATA_FORMAT(word1
));
1115 pitch
= G_038000_PITCH(word0
) + 1;
1116 switch (G_038000_TILE_MODE(word0
)) {
1117 case V_038000_ARRAY_LINEAR_GENERAL
:
1119 /* XXX check height align */
1121 case V_038000_ARRAY_LINEAR_ALIGNED
:
1122 pitch_align
= max((u32
)64, (u32
)(track
->group_size
/ bpe
)) / 8;
1123 if (!IS_ALIGNED(pitch
, pitch_align
)) {
1124 dev_warn(p
->dev
, "%s:%d tex pitch (%d) invalid\n",
1125 __func__
, __LINE__
, pitch
);
1128 /* XXX check height align */
1130 case V_038000_ARRAY_1D_TILED_THIN1
:
1131 pitch_align
= max((u32
)8, (u32
)(track
->group_size
/ (8 * bpe
))) / 8;
1132 if (!IS_ALIGNED(pitch
, pitch_align
)) {
1133 dev_warn(p
->dev
, "%s:%d tex pitch (%d) invalid\n",
1134 __func__
, __LINE__
, pitch
);
1137 /* XXX check height align */
1139 case V_038000_ARRAY_2D_TILED_THIN1
:
1140 pitch_align
= max((u32
)track
->nbanks
,
1141 (u32
)(((track
->group_size
/ 8) / bpe
) * track
->nbanks
));
1142 if (!IS_ALIGNED(pitch
, pitch_align
)) {
1143 dev_warn(p
->dev
, "%s:%d tex pitch (%d) invalid\n",
1144 __func__
, __LINE__
, pitch
);
1147 /* XXX check height align */
1150 dev_warn(p
->dev
, "%s invalid tiling %d (0x%08X)\n", __func__
,
1151 G_038000_TILE_MODE(word0
), word0
);
1154 /* XXX check offset align */
1156 word0
= radeon_get_ib_value(p
, idx
+ 4);
1157 word1
= radeon_get_ib_value(p
, idx
+ 5);
1158 blevel
= G_038010_BASE_LEVEL(word0
);
1159 nlevels
= G_038014_LAST_LEVEL(word1
);
1160 r600_texture_size(nfaces
, blevel
, nlevels
, w0
, h0
, d0
, bpe
,
1161 (pitch_align
* bpe
),
1162 &l0_size
, &mipmap_size
);
1163 /* using get ib will give us the offset into the texture bo */
1164 word0
= radeon_get_ib_value(p
, idx
+ 2) << 8;
1165 if ((l0_size
+ word0
) > radeon_bo_size(texture
)) {
1166 dev_warn(p
->dev
, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
1167 w0
, h0
, bpe
, word0
, l0_size
, radeon_bo_size(texture
));
1170 /* using get ib will give us the offset into the mipmap bo */
1171 word0
= radeon_get_ib_value(p
, idx
+ 3) << 8;
1172 if ((mipmap_size
+ word0
) > radeon_bo_size(mipmap
)) {
1173 dev_warn(p
->dev
, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1174 w0
, h0
, bpe
, blevel
, nlevels
, word0
, mipmap_size
, radeon_bo_size(texture
));
1180 static int r600_packet3_check(struct radeon_cs_parser
*p
,
1181 struct radeon_cs_packet
*pkt
)
1183 struct radeon_cs_reloc
*reloc
;
1184 struct r600_cs_track
*track
;
1188 unsigned start_reg
, end_reg
, reg
;
1192 track
= (struct r600_cs_track
*)p
->track
;
1195 idx_value
= radeon_get_ib_value(p
, idx
);
1197 switch (pkt
->opcode
) {
1198 case PACKET3_START_3D_CMDBUF
:
1199 if (p
->family
>= CHIP_RV770
|| pkt
->count
) {
1200 DRM_ERROR("bad START_3D\n");
1204 case PACKET3_CONTEXT_CONTROL
:
1205 if (pkt
->count
!= 1) {
1206 DRM_ERROR("bad CONTEXT_CONTROL\n");
1210 case PACKET3_INDEX_TYPE
:
1211 case PACKET3_NUM_INSTANCES
:
1213 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1217 case PACKET3_DRAW_INDEX
:
1218 if (pkt
->count
!= 3) {
1219 DRM_ERROR("bad DRAW_INDEX\n");
1222 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1224 DRM_ERROR("bad DRAW_INDEX\n");
1227 ib
[idx
+0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1228 ib
[idx
+1] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1229 r
= r600_cs_track_check(p
);
1231 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1235 case PACKET3_DRAW_INDEX_AUTO
:
1236 if (pkt
->count
!= 1) {
1237 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1240 r
= r600_cs_track_check(p
);
1242 dev_warn(p
->dev
, "%s:%d invalid cmd stream %d\n", __func__
, __LINE__
, idx
);
1246 case PACKET3_DRAW_INDEX_IMMD_BE
:
1247 case PACKET3_DRAW_INDEX_IMMD
:
1248 if (pkt
->count
< 2) {
1249 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1252 r
= r600_cs_track_check(p
);
1254 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1258 case PACKET3_WAIT_REG_MEM
:
1259 if (pkt
->count
!= 5) {
1260 DRM_ERROR("bad WAIT_REG_MEM\n");
1263 /* bit 4 is reg (0) or mem (1) */
1264 if (idx_value
& 0x10) {
1265 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1267 DRM_ERROR("bad WAIT_REG_MEM\n");
1270 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1271 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1274 case PACKET3_SURFACE_SYNC
:
1275 if (pkt
->count
!= 3) {
1276 DRM_ERROR("bad SURFACE_SYNC\n");
1279 /* 0xffffffff/0x0 is flush all cache flag */
1280 if (radeon_get_ib_value(p
, idx
+ 1) != 0xffffffff ||
1281 radeon_get_ib_value(p
, idx
+ 2) != 0) {
1282 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1284 DRM_ERROR("bad SURFACE_SYNC\n");
1287 ib
[idx
+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1290 case PACKET3_EVENT_WRITE
:
1291 if (pkt
->count
!= 2 && pkt
->count
!= 0) {
1292 DRM_ERROR("bad EVENT_WRITE\n");
1296 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1298 DRM_ERROR("bad EVENT_WRITE\n");
1301 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1302 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1305 case PACKET3_EVENT_WRITE_EOP
:
1306 if (pkt
->count
!= 4) {
1307 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1310 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1312 DRM_ERROR("bad EVENT_WRITE\n");
1315 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1316 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1318 case PACKET3_SET_CONFIG_REG
:
1319 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONFIG_REG_OFFSET
;
1320 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1321 if ((start_reg
< PACKET3_SET_CONFIG_REG_OFFSET
) ||
1322 (start_reg
>= PACKET3_SET_CONFIG_REG_END
) ||
1323 (end_reg
>= PACKET3_SET_CONFIG_REG_END
)) {
1324 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1327 for (i
= 0; i
< pkt
->count
; i
++) {
1328 reg
= start_reg
+ (4 * i
);
1329 r
= r600_cs_check_reg(p
, reg
, idx
+1+i
);
1334 case PACKET3_SET_CONTEXT_REG
:
1335 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONTEXT_REG_OFFSET
;
1336 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1337 if ((start_reg
< PACKET3_SET_CONTEXT_REG_OFFSET
) ||
1338 (start_reg
>= PACKET3_SET_CONTEXT_REG_END
) ||
1339 (end_reg
>= PACKET3_SET_CONTEXT_REG_END
)) {
1340 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1343 for (i
= 0; i
< pkt
->count
; i
++) {
1344 reg
= start_reg
+ (4 * i
);
1345 r
= r600_cs_check_reg(p
, reg
, idx
+1+i
);
1350 case PACKET3_SET_RESOURCE
:
1351 if (pkt
->count
% 7) {
1352 DRM_ERROR("bad SET_RESOURCE\n");
1355 start_reg
= (idx_value
<< 2) + PACKET3_SET_RESOURCE_OFFSET
;
1356 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1357 if ((start_reg
< PACKET3_SET_RESOURCE_OFFSET
) ||
1358 (start_reg
>= PACKET3_SET_RESOURCE_END
) ||
1359 (end_reg
>= PACKET3_SET_RESOURCE_END
)) {
1360 DRM_ERROR("bad SET_RESOURCE\n");
1363 for (i
= 0; i
< (pkt
->count
/ 7); i
++) {
1364 struct radeon_bo
*texture
, *mipmap
;
1365 u32 size
, offset
, base_offset
, mip_offset
;
1367 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p
, idx
+(i
*7)+6+1))) {
1368 case SQ_TEX_VTX_VALID_TEXTURE
:
1370 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1372 DRM_ERROR("bad SET_RESOURCE\n");
1375 base_offset
= (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1376 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
1377 ib
[idx
+1+(i
*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1
);
1378 else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
1379 ib
[idx
+1+(i
*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1
);
1380 texture
= reloc
->robj
;
1382 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1384 DRM_ERROR("bad SET_RESOURCE\n");
1387 mip_offset
= (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1388 mipmap
= reloc
->robj
;
1389 r
= r600_check_texture_resource(p
, idx
+(i
*7)+1,
1390 texture
, mipmap
, reloc
->lobj
.tiling_flags
);
1393 ib
[idx
+1+(i
*7)+2] += base_offset
;
1394 ib
[idx
+1+(i
*7)+3] += mip_offset
;
1396 case SQ_TEX_VTX_VALID_BUFFER
:
1398 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1400 DRM_ERROR("bad SET_RESOURCE\n");
1403 offset
= radeon_get_ib_value(p
, idx
+1+(i
*7)+0);
1404 size
= radeon_get_ib_value(p
, idx
+1+(i
*7)+1) + 1;
1405 if (p
->rdev
&& (size
+ offset
) > radeon_bo_size(reloc
->robj
)) {
1406 /* force size to size of the buffer */
1407 dev_warn(p
->dev
, "vbo resource seems too big (%d) for the bo (%ld)\n",
1408 size
+ offset
, radeon_bo_size(reloc
->robj
));
1409 ib
[idx
+1+(i
*7)+1] = radeon_bo_size(reloc
->robj
);
1411 ib
[idx
+1+(i
*7)+0] += (u32
)((reloc
->lobj
.gpu_offset
) & 0xffffffff);
1412 ib
[idx
+1+(i
*7)+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1414 case SQ_TEX_VTX_INVALID_TEXTURE
:
1415 case SQ_TEX_VTX_INVALID_BUFFER
:
1417 DRM_ERROR("bad SET_RESOURCE\n");
1422 case PACKET3_SET_ALU_CONST
:
1423 if (track
->sq_config
& DX9_CONSTS
) {
1424 start_reg
= (idx_value
<< 2) + PACKET3_SET_ALU_CONST_OFFSET
;
1425 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1426 if ((start_reg
< PACKET3_SET_ALU_CONST_OFFSET
) ||
1427 (start_reg
>= PACKET3_SET_ALU_CONST_END
) ||
1428 (end_reg
>= PACKET3_SET_ALU_CONST_END
)) {
1429 DRM_ERROR("bad SET_ALU_CONST\n");
1434 case PACKET3_SET_BOOL_CONST
:
1435 start_reg
= (idx_value
<< 2) + PACKET3_SET_BOOL_CONST_OFFSET
;
1436 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1437 if ((start_reg
< PACKET3_SET_BOOL_CONST_OFFSET
) ||
1438 (start_reg
>= PACKET3_SET_BOOL_CONST_END
) ||
1439 (end_reg
>= PACKET3_SET_BOOL_CONST_END
)) {
1440 DRM_ERROR("bad SET_BOOL_CONST\n");
1444 case PACKET3_SET_LOOP_CONST
:
1445 start_reg
= (idx_value
<< 2) + PACKET3_SET_LOOP_CONST_OFFSET
;
1446 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1447 if ((start_reg
< PACKET3_SET_LOOP_CONST_OFFSET
) ||
1448 (start_reg
>= PACKET3_SET_LOOP_CONST_END
) ||
1449 (end_reg
>= PACKET3_SET_LOOP_CONST_END
)) {
1450 DRM_ERROR("bad SET_LOOP_CONST\n");
1454 case PACKET3_SET_CTL_CONST
:
1455 start_reg
= (idx_value
<< 2) + PACKET3_SET_CTL_CONST_OFFSET
;
1456 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1457 if ((start_reg
< PACKET3_SET_CTL_CONST_OFFSET
) ||
1458 (start_reg
>= PACKET3_SET_CTL_CONST_END
) ||
1459 (end_reg
>= PACKET3_SET_CTL_CONST_END
)) {
1460 DRM_ERROR("bad SET_CTL_CONST\n");
1464 case PACKET3_SET_SAMPLER
:
1465 if (pkt
->count
% 3) {
1466 DRM_ERROR("bad SET_SAMPLER\n");
1469 start_reg
= (idx_value
<< 2) + PACKET3_SET_SAMPLER_OFFSET
;
1470 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1471 if ((start_reg
< PACKET3_SET_SAMPLER_OFFSET
) ||
1472 (start_reg
>= PACKET3_SET_SAMPLER_END
) ||
1473 (end_reg
>= PACKET3_SET_SAMPLER_END
)) {
1474 DRM_ERROR("bad SET_SAMPLER\n");
1478 case PACKET3_SURFACE_BASE_UPDATE
:
1479 if (p
->family
>= CHIP_RV770
|| p
->family
== CHIP_R600
) {
1480 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1484 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1491 DRM_ERROR("Packet3 opcode %x not supported\n", pkt
->opcode
);
1497 int r600_cs_parse(struct radeon_cs_parser
*p
)
1499 struct radeon_cs_packet pkt
;
1500 struct r600_cs_track
*track
;
1503 if (p
->track
== NULL
) {
1504 /* initialize tracker, we are in kms */
1505 track
= kzalloc(sizeof(*track
), GFP_KERNEL
);
1508 r600_cs_track_init(track
);
1509 if (p
->rdev
->family
< CHIP_RV770
) {
1510 track
->npipes
= p
->rdev
->config
.r600
.tiling_npipes
;
1511 track
->nbanks
= p
->rdev
->config
.r600
.tiling_nbanks
;
1512 track
->group_size
= p
->rdev
->config
.r600
.tiling_group_size
;
1513 } else if (p
->rdev
->family
<= CHIP_RV740
) {
1514 track
->npipes
= p
->rdev
->config
.rv770
.tiling_npipes
;
1515 track
->nbanks
= p
->rdev
->config
.rv770
.tiling_nbanks
;
1516 track
->group_size
= p
->rdev
->config
.rv770
.tiling_group_size
;
1521 r
= r600_cs_packet_parse(p
, &pkt
, p
->idx
);
1527 p
->idx
+= pkt
.count
+ 2;
1530 r
= r600_cs_parse_packet0(p
, &pkt
);
1535 r
= r600_packet3_check(p
, &pkt
);
1538 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
1548 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
1550 for (r
= 0; r
< p
->ib
->length_dw
; r
++) {
1551 printk(KERN_INFO
"%05d 0x%08X\n", r
, p
->ib
->ptr
[r
]);
1560 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser
*p
)
1562 if (p
->chunk_relocs_idx
== -1) {
1565 p
->relocs
= kzalloc(sizeof(struct radeon_cs_reloc
), GFP_KERNEL
);
1566 if (p
->relocs
== NULL
) {
1573 * cs_parser_fini() - clean parser states
1574 * @parser: parser structure holding parsing context.
1575 * @error: error number
1577 * If error is set than unvalidate buffer, otherwise just free memory
1578 * used by parsing context.
1580 static void r600_cs_parser_fini(struct radeon_cs_parser
*parser
, int error
)
1584 kfree(parser
->relocs
);
1585 for (i
= 0; i
< parser
->nchunks
; i
++) {
1586 kfree(parser
->chunks
[i
].kdata
);
1587 kfree(parser
->chunks
[i
].kpage
[0]);
1588 kfree(parser
->chunks
[i
].kpage
[1]);
1590 kfree(parser
->chunks
);
1591 kfree(parser
->chunks_array
);
1594 int r600_cs_legacy(struct drm_device
*dev
, void *data
, struct drm_file
*filp
,
1595 unsigned family
, u32
*ib
, int *l
)
1597 struct radeon_cs_parser parser
;
1598 struct radeon_cs_chunk
*ib_chunk
;
1599 struct radeon_ib fake_ib
;
1600 struct r600_cs_track
*track
;
1603 /* initialize tracker */
1604 track
= kzalloc(sizeof(*track
), GFP_KERNEL
);
1607 r600_cs_track_init(track
);
1608 r600_cs_legacy_get_tiling_conf(dev
, &track
->npipes
, &track
->nbanks
, &track
->group_size
);
1609 /* initialize parser */
1610 memset(&parser
, 0, sizeof(struct radeon_cs_parser
));
1612 parser
.dev
= &dev
->pdev
->dev
;
1614 parser
.family
= family
;
1615 parser
.ib
= &fake_ib
;
1616 parser
.track
= track
;
1618 r
= radeon_cs_parser_init(&parser
, data
);
1620 DRM_ERROR("Failed to initialize parser !\n");
1621 r600_cs_parser_fini(&parser
, r
);
1624 r
= r600_cs_parser_relocs_legacy(&parser
);
1626 DRM_ERROR("Failed to parse relocation !\n");
1627 r600_cs_parser_fini(&parser
, r
);
1630 /* Copy the packet into the IB, the parser will read from the
1631 * input memory (cached) and write to the IB (which can be
1633 ib_chunk
= &parser
.chunks
[parser
.chunk_ib_idx
];
1634 parser
.ib
->length_dw
= ib_chunk
->length_dw
;
1635 *l
= parser
.ib
->length_dw
;
1636 r
= r600_cs_parse(&parser
);
1638 DRM_ERROR("Invalid command stream !\n");
1639 r600_cs_parser_fini(&parser
, r
);
1642 r
= radeon_cs_finish_pages(&parser
);
1644 DRM_ERROR("Invalid command stream !\n");
1645 r600_cs_parser_fini(&parser
, r
);
1648 r600_cs_parser_fini(&parser
, r
);
1652 void r600_cs_legacy_init(void)
1654 r600_cs_packet_next_reloc
= &r600_cs_packet_next_reloc_nomm
;