2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/kernel.h>
32 #include "r600_reg_safe.h"
34 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser
*p
,
35 struct radeon_cs_reloc
**cs_reloc
);
36 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser
*p
,
37 struct radeon_cs_reloc
**cs_reloc
);
38 typedef int (*next_reloc_t
)(struct radeon_cs_parser
*, struct radeon_cs_reloc
**);
39 static next_reloc_t r600_cs_packet_next_reloc
= &r600_cs_packet_next_reloc_mm
;
40 extern void r600_cs_legacy_get_tiling_conf(struct drm_device
*dev
, u32
*npipes
, u32
*nbanks
, u32
*group_size
);
43 struct r600_cs_track
{
44 /* configuration we miror so that we use same code btw kms/ums */
51 u32 cb_color_base_last
[8];
52 struct radeon_bo
*cb_color_bo
[8];
53 u64 cb_color_bo_mc
[8];
54 u32 cb_color_bo_offset
[8];
55 struct radeon_bo
*cb_color_frag_bo
[8];
56 struct radeon_bo
*cb_color_tile_bo
[8];
58 u32 cb_color_size_idx
[8];
63 u32 vgt_strmout_buffer_en
;
66 u32 db_depth_size_idx
;
70 struct radeon_bo
*db_bo
;
74 static inline int r600_bpe_from_format(u32
*bpe
, u32 format
)
77 case V_038004_COLOR_8
:
78 case V_038004_COLOR_4_4
:
79 case V_038004_COLOR_3_3_2
:
83 case V_038004_COLOR_16
:
84 case V_038004_COLOR_16_FLOAT
:
85 case V_038004_COLOR_8_8
:
86 case V_038004_COLOR_5_6_5
:
87 case V_038004_COLOR_6_5_5
:
88 case V_038004_COLOR_1_5_5_5
:
89 case V_038004_COLOR_4_4_4_4
:
90 case V_038004_COLOR_5_5_5_1
:
93 case V_038004_FMT_8_8_8
:
96 case V_038004_COLOR_32
:
97 case V_038004_COLOR_32_FLOAT
:
98 case V_038004_COLOR_16_16
:
99 case V_038004_COLOR_16_16_FLOAT
:
100 case V_038004_COLOR_8_24
:
101 case V_038004_COLOR_8_24_FLOAT
:
102 case V_038004_COLOR_24_8
:
103 case V_038004_COLOR_24_8_FLOAT
:
104 case V_038004_COLOR_10_11_11
:
105 case V_038004_COLOR_10_11_11_FLOAT
:
106 case V_038004_COLOR_11_11_10
:
107 case V_038004_COLOR_11_11_10_FLOAT
:
108 case V_038004_COLOR_2_10_10_10
:
109 case V_038004_COLOR_8_8_8_8
:
110 case V_038004_COLOR_10_10_10_2
:
111 case V_038004_FMT_5_9_9_9_SHAREDEXP
:
112 case V_038004_FMT_32_AS_8
:
113 case V_038004_FMT_32_AS_8_8
:
116 case V_038004_COLOR_X24_8_32_FLOAT
:
117 case V_038004_COLOR_32_32
:
118 case V_038004_COLOR_32_32_FLOAT
:
119 case V_038004_COLOR_16_16_16_16
:
120 case V_038004_COLOR_16_16_16_16_FLOAT
:
123 case V_038004_FMT_16_16_16
:
124 case V_038004_FMT_16_16_16_FLOAT
:
127 case V_038004_FMT_32_32_32
:
128 case V_038004_FMT_32_32_32_FLOAT
:
131 case V_038004_COLOR_32_32_32_32
:
132 case V_038004_COLOR_32_32_32_32_FLOAT
:
135 case V_038004_FMT_GB_GR
:
136 case V_038004_FMT_BG_RG
:
137 case V_038004_COLOR_INVALID
:
145 struct array_mode_checker
{
154 /* returns alignment in pixels for pitch/height/depth and bytes for base */
155 static inline int r600_get_array_mode_alignment(struct array_mode_checker
*values
,
163 u32 macro_tile_width
= values
->nbanks
;
164 u32 macro_tile_height
= values
->npipes
;
165 u32 tile_bytes
= tile_width
* tile_height
* values
->bpe
* values
->nsamples
;
166 u32 macro_tile_bytes
= macro_tile_width
* macro_tile_height
* tile_bytes
;
168 switch (values
->array_mode
) {
169 case ARRAY_LINEAR_GENERAL
:
170 /* technically tile_width/_height for pitch/height */
171 *pitch_align
= 1; /* tile_width */
172 *height_align
= 1; /* tile_height */
176 case ARRAY_LINEAR_ALIGNED
:
177 *pitch_align
= max((u32
)64, (u32
)(values
->group_size
/ values
->bpe
));
178 *height_align
= tile_height
;
180 *base_align
= values
->group_size
;
182 case ARRAY_1D_TILED_THIN1
:
183 *pitch_align
= max((u32
)tile_width
,
184 (u32
)(values
->group_size
/
185 (tile_height
* values
->bpe
* values
->nsamples
)));
186 *height_align
= tile_height
;
188 *base_align
= values
->group_size
;
190 case ARRAY_2D_TILED_THIN1
:
191 *pitch_align
= max((u32
)macro_tile_width
,
192 (u32
)(((values
->group_size
/ tile_height
) /
193 (values
->bpe
* values
->nsamples
)) *
194 values
->nbanks
)) * tile_width
;
195 *height_align
= macro_tile_height
* tile_height
;
197 *base_align
= max(macro_tile_bytes
,
198 (*pitch_align
) * values
->bpe
* (*height_align
) * values
->nsamples
);
207 static void r600_cs_track_init(struct r600_cs_track
*track
)
211 /* assume DX9 mode */
212 track
->sq_config
= DX9_CONSTS
;
213 for (i
= 0; i
< 8; i
++) {
214 track
->cb_color_base_last
[i
] = 0;
215 track
->cb_color_size
[i
] = 0;
216 track
->cb_color_size_idx
[i
] = 0;
217 track
->cb_color_info
[i
] = 0;
218 track
->cb_color_bo
[i
] = NULL
;
219 track
->cb_color_bo_offset
[i
] = 0xFFFFFFFF;
220 track
->cb_color_bo_mc
[i
] = 0xFFFFFFFF;
222 track
->cb_target_mask
= 0xFFFFFFFF;
223 track
->cb_shader_mask
= 0xFFFFFFFF;
225 track
->db_bo_mc
= 0xFFFFFFFF;
226 /* assume the biggest format and that htile is enabled */
227 track
->db_depth_info
= 7 | (1 << 25);
228 track
->db_depth_view
= 0xFFFFC000;
229 track
->db_depth_size
= 0xFFFFFFFF;
230 track
->db_depth_size_idx
= 0;
231 track
->db_depth_control
= 0xFFFFFFFF;
234 static inline int r600_cs_track_validate_cb(struct radeon_cs_parser
*p
, int i
)
236 struct r600_cs_track
*track
= p
->track
;
237 u32 bpe
= 0, slice_tile_max
, size
, tmp
;
238 u32 height
, height_align
, pitch
, pitch_align
, depth_align
;
239 u64 base_offset
, base_align
;
240 struct array_mode_checker array_check
;
241 volatile u32
*ib
= p
->ib
->ptr
;
244 if (G_0280A0_TILE_MODE(track
->cb_color_info
[i
])) {
245 dev_warn(p
->dev
, "FMASK or CMASK buffer are not supported by this kernel\n");
248 size
= radeon_bo_size(track
->cb_color_bo
[i
]) - track
->cb_color_bo_offset
[i
];
249 if (r600_bpe_from_format(&bpe
, G_0280A0_FORMAT(track
->cb_color_info
[i
]))) {
250 dev_warn(p
->dev
, "%s:%d cb invalid format %d for %d (0x%08X)\n",
251 __func__
, __LINE__
, G_0280A0_FORMAT(track
->cb_color_info
[i
]),
252 i
, track
->cb_color_info
[i
]);
255 /* pitch in pixels */
256 pitch
= (G_028060_PITCH_TILE_MAX(track
->cb_color_size
[i
]) + 1) * 8;
257 slice_tile_max
= G_028060_SLICE_TILE_MAX(track
->cb_color_size
[i
]) + 1;
258 slice_tile_max
*= 64;
259 height
= slice_tile_max
/ pitch
;
262 array_mode
= G_0280A0_ARRAY_MODE(track
->cb_color_info
[i
]);
264 base_offset
= track
->cb_color_bo_mc
[i
] + track
->cb_color_bo_offset
[i
];
265 array_check
.array_mode
= array_mode
;
266 array_check
.group_size
= track
->group_size
;
267 array_check
.nbanks
= track
->nbanks
;
268 array_check
.npipes
= track
->npipes
;
269 array_check
.nsamples
= track
->nsamples
;
270 array_check
.bpe
= bpe
;
271 if (r600_get_array_mode_alignment(&array_check
,
272 &pitch_align
, &height_align
, &depth_align
, &base_align
)) {
273 dev_warn(p
->dev
, "%s invalid tiling %d for %d (0x%08X)\n", __func__
,
274 G_0280A0_ARRAY_MODE(track
->cb_color_info
[i
]), i
,
275 track
->cb_color_info
[i
]);
278 switch (array_mode
) {
279 case V_0280A0_ARRAY_LINEAR_GENERAL
:
281 case V_0280A0_ARRAY_LINEAR_ALIGNED
:
283 case V_0280A0_ARRAY_1D_TILED_THIN1
:
284 /* avoid breaking userspace */
288 case V_0280A0_ARRAY_2D_TILED_THIN1
:
291 dev_warn(p
->dev
, "%s invalid tiling %d for %d (0x%08X)\n", __func__
,
292 G_0280A0_ARRAY_MODE(track
->cb_color_info
[i
]), i
,
293 track
->cb_color_info
[i
]);
297 if (!IS_ALIGNED(pitch
, pitch_align
)) {
298 dev_warn(p
->dev
, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
299 __func__
, __LINE__
, pitch
, pitch_align
, array_mode
);
302 if (!IS_ALIGNED(height
, height_align
)) {
303 dev_warn(p
->dev
, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
304 __func__
, __LINE__
, height
, height_align
, array_mode
);
307 if (!IS_ALIGNED(base_offset
, base_align
)) {
308 dev_warn(p
->dev
, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__
, i
,
309 base_offset
, base_align
, array_mode
);
314 tmp
= height
* pitch
* bpe
;
315 if ((tmp
+ track
->cb_color_bo_offset
[i
]) > radeon_bo_size(track
->cb_color_bo
[i
])) {
316 if (array_mode
== V_0280A0_ARRAY_LINEAR_GENERAL
) {
317 /* the initial DDX does bad things with the CB size occasionally */
318 /* it rounds up height too far for slice tile max but the BO is smaller */
319 /* r600c,g also seem to flush at bad times in some apps resulting in
320 * bogus values here. So for linear just allow anything to avoid breaking
324 dev_warn(p
->dev
, "%s offset[%d] %d %d %d %lu too big\n", __func__
, i
,
326 track
->cb_color_bo_offset
[i
], tmp
,
327 radeon_bo_size(track
->cb_color_bo
[i
]));
332 tmp
= (height
* pitch
) >> 6;
333 if (tmp
< slice_tile_max
)
334 slice_tile_max
= tmp
;
335 tmp
= S_028060_PITCH_TILE_MAX((pitch
/ 8) - 1) |
336 S_028060_SLICE_TILE_MAX(slice_tile_max
- 1);
337 ib
[track
->cb_color_size_idx
[i
]] = tmp
;
341 static int r600_cs_track_check(struct radeon_cs_parser
*p
)
343 struct r600_cs_track
*track
= p
->track
;
346 volatile u32
*ib
= p
->ib
->ptr
;
348 /* on legacy kernel we don't perform advanced check */
351 /* we don't support out buffer yet */
352 if (track
->vgt_strmout_en
|| track
->vgt_strmout_buffer_en
) {
353 dev_warn(p
->dev
, "this kernel doesn't support SMX output buffer\n");
356 /* check that we have a cb for each enabled target, we don't check
357 * shader_mask because it seems mesa isn't always setting it :(
359 tmp
= track
->cb_target_mask
;
360 for (i
= 0; i
< 8; i
++) {
361 if ((tmp
>> (i
* 4)) & 0xF) {
362 /* at least one component is enabled */
363 if (track
->cb_color_bo
[i
] == NULL
) {
364 dev_warn(p
->dev
, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
365 __func__
, __LINE__
, track
->cb_target_mask
, track
->cb_shader_mask
, i
);
368 /* perform rewrite of CB_COLOR[0-7]_SIZE */
369 r
= r600_cs_track_validate_cb(p
, i
);
374 /* Check depth buffer */
375 if (G_028800_STENCIL_ENABLE(track
->db_depth_control
) ||
376 G_028800_Z_ENABLE(track
->db_depth_control
)) {
377 u32 nviews
, bpe
, ntiles
, size
, slice_tile_max
;
378 u32 height
, height_align
, pitch
, pitch_align
, depth_align
;
379 u64 base_offset
, base_align
;
380 struct array_mode_checker array_check
;
383 if (track
->db_bo
== NULL
) {
384 dev_warn(p
->dev
, "z/stencil with no depth buffer\n");
387 if (G_028010_TILE_SURFACE_ENABLE(track
->db_depth_info
)) {
388 dev_warn(p
->dev
, "this kernel doesn't support z/stencil htile\n");
391 switch (G_028010_FORMAT(track
->db_depth_info
)) {
392 case V_028010_DEPTH_16
:
395 case V_028010_DEPTH_X8_24
:
396 case V_028010_DEPTH_8_24
:
397 case V_028010_DEPTH_X8_24_FLOAT
:
398 case V_028010_DEPTH_8_24_FLOAT
:
399 case V_028010_DEPTH_32_FLOAT
:
402 case V_028010_DEPTH_X24_8_32_FLOAT
:
406 dev_warn(p
->dev
, "z/stencil with invalid format %d\n", G_028010_FORMAT(track
->db_depth_info
));
409 if ((track
->db_depth_size
& 0xFFFFFC00) == 0xFFFFFC00) {
410 if (!track
->db_depth_size_idx
) {
411 dev_warn(p
->dev
, "z/stencil buffer size not set\n");
414 tmp
= radeon_bo_size(track
->db_bo
) - track
->db_offset
;
415 tmp
= (tmp
/ bpe
) >> 6;
417 dev_warn(p
->dev
, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
418 track
->db_depth_size
, bpe
, track
->db_offset
,
419 radeon_bo_size(track
->db_bo
));
422 ib
[track
->db_depth_size_idx
] = S_028000_SLICE_TILE_MAX(tmp
- 1) | (track
->db_depth_size
& 0x3FF);
424 size
= radeon_bo_size(track
->db_bo
);
425 /* pitch in pixels */
426 pitch
= (G_028000_PITCH_TILE_MAX(track
->db_depth_size
) + 1) * 8;
427 slice_tile_max
= G_028000_SLICE_TILE_MAX(track
->db_depth_size
) + 1;
428 slice_tile_max
*= 64;
429 height
= slice_tile_max
/ pitch
;
432 base_offset
= track
->db_bo_mc
+ track
->db_offset
;
433 array_mode
= G_028010_ARRAY_MODE(track
->db_depth_info
);
434 array_check
.array_mode
= array_mode
;
435 array_check
.group_size
= track
->group_size
;
436 array_check
.nbanks
= track
->nbanks
;
437 array_check
.npipes
= track
->npipes
;
438 array_check
.nsamples
= track
->nsamples
;
439 array_check
.bpe
= bpe
;
440 if (r600_get_array_mode_alignment(&array_check
,
441 &pitch_align
, &height_align
, &depth_align
, &base_align
)) {
442 dev_warn(p
->dev
, "%s invalid tiling %d (0x%08X)\n", __func__
,
443 G_028010_ARRAY_MODE(track
->db_depth_info
),
444 track
->db_depth_info
);
447 switch (array_mode
) {
448 case V_028010_ARRAY_1D_TILED_THIN1
:
449 /* don't break userspace */
452 case V_028010_ARRAY_2D_TILED_THIN1
:
455 dev_warn(p
->dev
, "%s invalid tiling %d (0x%08X)\n", __func__
,
456 G_028010_ARRAY_MODE(track
->db_depth_info
),
457 track
->db_depth_info
);
461 if (!IS_ALIGNED(pitch
, pitch_align
)) {
462 dev_warn(p
->dev
, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
463 __func__
, __LINE__
, pitch
, pitch_align
, array_mode
);
466 if (!IS_ALIGNED(height
, height_align
)) {
467 dev_warn(p
->dev
, "%s:%d db height (%d, 0x%x, %d) invalid\n",
468 __func__
, __LINE__
, height
, height_align
, array_mode
);
471 if (!IS_ALIGNED(base_offset
, base_align
)) {
472 dev_warn(p
->dev
, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__
, i
,
473 base_offset
, base_align
, array_mode
);
477 ntiles
= G_028000_SLICE_TILE_MAX(track
->db_depth_size
) + 1;
478 nviews
= G_028004_SLICE_MAX(track
->db_depth_view
) + 1;
479 tmp
= ntiles
* bpe
* 64 * nviews
;
480 if ((tmp
+ track
->db_offset
) > radeon_bo_size(track
->db_bo
)) {
481 dev_warn(p
->dev
, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
483 track
->db_depth_size
, ntiles
, nviews
, bpe
, tmp
+ track
->db_offset
,
484 radeon_bo_size(track
->db_bo
));
493 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
494 * @parser: parser structure holding parsing context.
495 * @pkt: where to store packet informations
497 * Assume that chunk_ib_index is properly set. Will return -EINVAL
498 * if packet is bigger than remaining ib size. or if packets is unknown.
500 int r600_cs_packet_parse(struct radeon_cs_parser
*p
,
501 struct radeon_cs_packet
*pkt
,
504 struct radeon_cs_chunk
*ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
507 if (idx
>= ib_chunk
->length_dw
) {
508 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
509 idx
, ib_chunk
->length_dw
);
512 header
= radeon_get_ib_value(p
, idx
);
514 pkt
->type
= CP_PACKET_GET_TYPE(header
);
515 pkt
->count
= CP_PACKET_GET_COUNT(header
);
519 pkt
->reg
= CP_PACKET0_GET_REG(header
);
522 pkt
->opcode
= CP_PACKET3_GET_OPCODE(header
);
528 DRM_ERROR("Unknown packet type %d at %d !\n", pkt
->type
, idx
);
531 if ((pkt
->count
+ 1 + pkt
->idx
) >= ib_chunk
->length_dw
) {
532 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
533 pkt
->idx
, pkt
->type
, pkt
->count
, ib_chunk
->length_dw
);
540 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
541 * @parser: parser structure holding parsing context.
542 * @data: pointer to relocation data
543 * @offset_start: starting offset
544 * @offset_mask: offset mask (to align start offset on)
545 * @reloc: reloc informations
547 * Check next packet is relocation packet3, do bo validation and compute
548 * GPU offset using the provided start.
550 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser
*p
,
551 struct radeon_cs_reloc
**cs_reloc
)
553 struct radeon_cs_chunk
*relocs_chunk
;
554 struct radeon_cs_packet p3reloc
;
558 if (p
->chunk_relocs_idx
== -1) {
559 DRM_ERROR("No relocation chunk !\n");
563 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
564 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
568 p
->idx
+= p3reloc
.count
+ 2;
569 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
570 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
574 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
575 if (idx
>= relocs_chunk
->length_dw
) {
576 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
577 idx
, relocs_chunk
->length_dw
);
580 /* FIXME: we assume reloc size is 4 dwords */
581 *cs_reloc
= p
->relocs_ptr
[(idx
/ 4)];
586 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
587 * @parser: parser structure holding parsing context.
588 * @data: pointer to relocation data
589 * @offset_start: starting offset
590 * @offset_mask: offset mask (to align start offset on)
591 * @reloc: reloc informations
593 * Check next packet is relocation packet3, do bo validation and compute
594 * GPU offset using the provided start.
596 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser
*p
,
597 struct radeon_cs_reloc
**cs_reloc
)
599 struct radeon_cs_chunk
*relocs_chunk
;
600 struct radeon_cs_packet p3reloc
;
604 if (p
->chunk_relocs_idx
== -1) {
605 DRM_ERROR("No relocation chunk !\n");
609 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
610 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
614 p
->idx
+= p3reloc
.count
+ 2;
615 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
616 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
620 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
621 if (idx
>= relocs_chunk
->length_dw
) {
622 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
623 idx
, relocs_chunk
->length_dw
);
626 *cs_reloc
= p
->relocs
;
627 (*cs_reloc
)->lobj
.gpu_offset
= (u64
)relocs_chunk
->kdata
[idx
+ 3] << 32;
628 (*cs_reloc
)->lobj
.gpu_offset
|= relocs_chunk
->kdata
[idx
+ 0];
633 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
634 * @parser: parser structure holding parsing context.
636 * Check next packet is relocation packet3, do bo validation and compute
637 * GPU offset using the provided start.
639 static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser
*p
)
641 struct radeon_cs_packet p3reloc
;
644 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
648 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
655 * r600_cs_packet_next_vline() - parse userspace VLINE packet
656 * @parser: parser structure holding parsing context.
658 * Userspace sends a special sequence for VLINE waits.
659 * PACKET0 - VLINE_START_END + value
660 * PACKET3 - WAIT_REG_MEM poll vline status reg
661 * RELOC (P3) - crtc_id in reloc.
663 * This function parses this and relocates the VLINE START END
664 * and WAIT_REG_MEM packets to the correct crtc.
665 * It also detects a switched off crtc and nulls out the
668 static int r600_cs_packet_parse_vline(struct radeon_cs_parser
*p
)
670 struct drm_mode_object
*obj
;
671 struct drm_crtc
*crtc
;
672 struct radeon_crtc
*radeon_crtc
;
673 struct radeon_cs_packet p3reloc
, wait_reg_mem
;
676 uint32_t header
, h_idx
, reg
, wait_reg_mem_info
;
677 volatile uint32_t *ib
;
681 /* parse the WAIT_REG_MEM */
682 r
= r600_cs_packet_parse(p
, &wait_reg_mem
, p
->idx
);
686 /* check its a WAIT_REG_MEM */
687 if (wait_reg_mem
.type
!= PACKET_TYPE3
||
688 wait_reg_mem
.opcode
!= PACKET3_WAIT_REG_MEM
) {
689 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
694 wait_reg_mem_info
= radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 1);
695 /* bit 4 is reg (0) or mem (1) */
696 if (wait_reg_mem_info
& 0x10) {
697 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
701 /* waiting for value to be equal */
702 if ((wait_reg_mem_info
& 0x7) != 0x3) {
703 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
707 if ((radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 2) << 2) != AVIVO_D1MODE_VLINE_STATUS
) {
708 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
713 if (radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 5) != AVIVO_D1MODE_VLINE_STAT
) {
714 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
719 /* jump over the NOP */
720 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
+ wait_reg_mem
.count
+ 2);
725 p
->idx
+= wait_reg_mem
.count
+ 2;
726 p
->idx
+= p3reloc
.count
+ 2;
728 header
= radeon_get_ib_value(p
, h_idx
);
729 crtc_id
= radeon_get_ib_value(p
, h_idx
+ 2 + 7 + 1);
730 reg
= CP_PACKET0_GET_REG(header
);
732 obj
= drm_mode_object_find(p
->rdev
->ddev
, crtc_id
, DRM_MODE_OBJECT_CRTC
);
734 DRM_ERROR("cannot find crtc %d\n", crtc_id
);
738 crtc
= obj_to_crtc(obj
);
739 radeon_crtc
= to_radeon_crtc(crtc
);
740 crtc_id
= radeon_crtc
->crtc_id
;
742 if (!crtc
->enabled
) {
743 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
744 ib
[h_idx
+ 2] = PACKET2(0);
745 ib
[h_idx
+ 3] = PACKET2(0);
746 ib
[h_idx
+ 4] = PACKET2(0);
747 ib
[h_idx
+ 5] = PACKET2(0);
748 ib
[h_idx
+ 6] = PACKET2(0);
749 ib
[h_idx
+ 7] = PACKET2(0);
750 ib
[h_idx
+ 8] = PACKET2(0);
751 } else if (crtc_id
== 1) {
753 case AVIVO_D1MODE_VLINE_START_END
:
754 header
&= ~R600_CP_PACKET0_REG_MASK
;
755 header
|= AVIVO_D2MODE_VLINE_START_END
>> 2;
758 DRM_ERROR("unknown crtc reloc\n");
763 ib
[h_idx
+ 4] = AVIVO_D2MODE_VLINE_STATUS
>> 2;
769 static int r600_packet0_check(struct radeon_cs_parser
*p
,
770 struct radeon_cs_packet
*pkt
,
771 unsigned idx
, unsigned reg
)
776 case AVIVO_D1MODE_VLINE_START_END
:
777 r
= r600_cs_packet_parse_vline(p
);
779 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
785 printk(KERN_ERR
"Forbidden register 0x%04X in cs at %d\n",
792 static int r600_cs_parse_packet0(struct radeon_cs_parser
*p
,
793 struct radeon_cs_packet
*pkt
)
801 for (i
= 0; i
<= pkt
->count
; i
++, idx
++, reg
+= 4) {
802 r
= r600_packet0_check(p
, pkt
, idx
, reg
);
811 * r600_cs_check_reg() - check if register is authorized or not
812 * @parser: parser structure holding parsing context
813 * @reg: register we are testing
814 * @idx: index into the cs buffer
816 * This function will test against r600_reg_safe_bm and return 0
817 * if register is safe. If register is not flag as safe this function
818 * will test it against a list of register needind special handling.
820 static inline int r600_cs_check_reg(struct radeon_cs_parser
*p
, u32 reg
, u32 idx
)
822 struct r600_cs_track
*track
= (struct r600_cs_track
*)p
->track
;
823 struct radeon_cs_reloc
*reloc
;
824 u32 last_reg
= ARRAY_SIZE(r600_reg_safe_bm
);
830 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
833 m
= 1 << ((reg
>> 2) & 31);
834 if (!(r600_reg_safe_bm
[i
] & m
))
838 /* force following reg to 0 in an attemp to disable out buffer
839 * which will need us to better understand how it works to perform
840 * security check on it (Jerome)
842 case R_0288A8_SQ_ESGS_RING_ITEMSIZE
:
843 case R_008C44_SQ_ESGS_RING_SIZE
:
844 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE
:
845 case R_008C54_SQ_ESTMP_RING_SIZE
:
846 case R_0288C0_SQ_FBUF_RING_ITEMSIZE
:
847 case R_008C74_SQ_FBUF_RING_SIZE
:
848 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE
:
849 case R_008C5C_SQ_GSTMP_RING_SIZE
:
850 case R_0288AC_SQ_GSVS_RING_ITEMSIZE
:
851 case R_008C4C_SQ_GSVS_RING_SIZE
:
852 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE
:
853 case R_008C6C_SQ_PSTMP_RING_SIZE
:
854 case R_0288C4_SQ_REDUC_RING_ITEMSIZE
:
855 case R_008C7C_SQ_REDUC_RING_SIZE
:
856 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE
:
857 case R_008C64_SQ_VSTMP_RING_SIZE
:
858 case R_0288C8_SQ_GS_VERT_ITEMSIZE
:
859 /* get value to populate the IB don't remove */
860 tmp
=radeon_get_ib_value(p
, idx
);
864 track
->sq_config
= radeon_get_ib_value(p
, idx
);
866 case R_028800_DB_DEPTH_CONTROL
:
867 track
->db_depth_control
= radeon_get_ib_value(p
, idx
);
869 case R_028010_DB_DEPTH_INFO
:
870 if (r600_cs_packet_next_is_pkt3_nop(p
)) {
871 r
= r600_cs_packet_next_reloc(p
, &reloc
);
873 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
877 track
->db_depth_info
= radeon_get_ib_value(p
, idx
);
878 ib
[idx
] &= C_028010_ARRAY_MODE
;
879 track
->db_depth_info
&= C_028010_ARRAY_MODE
;
880 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
881 ib
[idx
] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1
);
882 track
->db_depth_info
|= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1
);
884 ib
[idx
] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1
);
885 track
->db_depth_info
|= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1
);
888 track
->db_depth_info
= radeon_get_ib_value(p
, idx
);
890 case R_028004_DB_DEPTH_VIEW
:
891 track
->db_depth_view
= radeon_get_ib_value(p
, idx
);
893 case R_028000_DB_DEPTH_SIZE
:
894 track
->db_depth_size
= radeon_get_ib_value(p
, idx
);
895 track
->db_depth_size_idx
= idx
;
897 case R_028AB0_VGT_STRMOUT_EN
:
898 track
->vgt_strmout_en
= radeon_get_ib_value(p
, idx
);
900 case R_028B20_VGT_STRMOUT_BUFFER_EN
:
901 track
->vgt_strmout_buffer_en
= radeon_get_ib_value(p
, idx
);
903 case R_028238_CB_TARGET_MASK
:
904 track
->cb_target_mask
= radeon_get_ib_value(p
, idx
);
906 case R_02823C_CB_SHADER_MASK
:
907 track
->cb_shader_mask
= radeon_get_ib_value(p
, idx
);
909 case R_028C04_PA_SC_AA_CONFIG
:
910 tmp
= G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p
, idx
));
911 track
->nsamples
= 1 << tmp
;
913 case R_0280A0_CB_COLOR0_INFO
:
914 case R_0280A4_CB_COLOR1_INFO
:
915 case R_0280A8_CB_COLOR2_INFO
:
916 case R_0280AC_CB_COLOR3_INFO
:
917 case R_0280B0_CB_COLOR4_INFO
:
918 case R_0280B4_CB_COLOR5_INFO
:
919 case R_0280B8_CB_COLOR6_INFO
:
920 case R_0280BC_CB_COLOR7_INFO
:
921 if (r600_cs_packet_next_is_pkt3_nop(p
)) {
922 r
= r600_cs_packet_next_reloc(p
, &reloc
);
924 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
927 tmp
= (reg
- R_0280A0_CB_COLOR0_INFO
) / 4;
928 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
929 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
930 ib
[idx
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1
);
931 track
->cb_color_info
[tmp
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1
);
932 } else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
) {
933 ib
[idx
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1
);
934 track
->cb_color_info
[tmp
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1
);
937 tmp
= (reg
- R_0280A0_CB_COLOR0_INFO
) / 4;
938 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
941 case R_028060_CB_COLOR0_SIZE
:
942 case R_028064_CB_COLOR1_SIZE
:
943 case R_028068_CB_COLOR2_SIZE
:
944 case R_02806C_CB_COLOR3_SIZE
:
945 case R_028070_CB_COLOR4_SIZE
:
946 case R_028074_CB_COLOR5_SIZE
:
947 case R_028078_CB_COLOR6_SIZE
:
948 case R_02807C_CB_COLOR7_SIZE
:
949 tmp
= (reg
- R_028060_CB_COLOR0_SIZE
) / 4;
950 track
->cb_color_size
[tmp
] = radeon_get_ib_value(p
, idx
);
951 track
->cb_color_size_idx
[tmp
] = idx
;
953 /* This register were added late, there is userspace
954 * which does provide relocation for those but set
955 * 0 offset. In order to avoid breaking old userspace
956 * we detect this and set address to point to last
957 * CB_COLOR0_BASE, note that if userspace doesn't set
958 * CB_COLOR0_BASE before this register we will report
959 * error. Old userspace always set CB_COLOR0_BASE
960 * before any of this.
962 case R_0280E0_CB_COLOR0_FRAG
:
963 case R_0280E4_CB_COLOR1_FRAG
:
964 case R_0280E8_CB_COLOR2_FRAG
:
965 case R_0280EC_CB_COLOR3_FRAG
:
966 case R_0280F0_CB_COLOR4_FRAG
:
967 case R_0280F4_CB_COLOR5_FRAG
:
968 case R_0280F8_CB_COLOR6_FRAG
:
969 case R_0280FC_CB_COLOR7_FRAG
:
970 tmp
= (reg
- R_0280E0_CB_COLOR0_FRAG
) / 4;
971 if (!r600_cs_packet_next_is_pkt3_nop(p
)) {
972 if (!track
->cb_color_base_last
[tmp
]) {
973 dev_err(p
->dev
, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg
);
976 ib
[idx
] = track
->cb_color_base_last
[tmp
];
977 track
->cb_color_frag_bo
[tmp
] = track
->cb_color_bo
[tmp
];
979 r
= r600_cs_packet_next_reloc(p
, &reloc
);
981 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
984 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
985 track
->cb_color_frag_bo
[tmp
] = reloc
->robj
;
988 case R_0280C0_CB_COLOR0_TILE
:
989 case R_0280C4_CB_COLOR1_TILE
:
990 case R_0280C8_CB_COLOR2_TILE
:
991 case R_0280CC_CB_COLOR3_TILE
:
992 case R_0280D0_CB_COLOR4_TILE
:
993 case R_0280D4_CB_COLOR5_TILE
:
994 case R_0280D8_CB_COLOR6_TILE
:
995 case R_0280DC_CB_COLOR7_TILE
:
996 tmp
= (reg
- R_0280C0_CB_COLOR0_TILE
) / 4;
997 if (!r600_cs_packet_next_is_pkt3_nop(p
)) {
998 if (!track
->cb_color_base_last
[tmp
]) {
999 dev_err(p
->dev
, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg
);
1002 ib
[idx
] = track
->cb_color_base_last
[tmp
];
1003 track
->cb_color_tile_bo
[tmp
] = track
->cb_color_bo
[tmp
];
1005 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1007 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
1010 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1011 track
->cb_color_tile_bo
[tmp
] = reloc
->robj
;
1014 case CB_COLOR0_BASE
:
1015 case CB_COLOR1_BASE
:
1016 case CB_COLOR2_BASE
:
1017 case CB_COLOR3_BASE
:
1018 case CB_COLOR4_BASE
:
1019 case CB_COLOR5_BASE
:
1020 case CB_COLOR6_BASE
:
1021 case CB_COLOR7_BASE
:
1022 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1024 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
1028 tmp
= (reg
- CB_COLOR0_BASE
) / 4;
1029 track
->cb_color_bo_offset
[tmp
] = radeon_get_ib_value(p
, idx
) << 8;
1030 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1031 track
->cb_color_base_last
[tmp
] = ib
[idx
];
1032 track
->cb_color_bo
[tmp
] = reloc
->robj
;
1033 track
->cb_color_bo_mc
[tmp
] = reloc
->lobj
.gpu_offset
;
1036 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1038 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
1042 track
->db_offset
= radeon_get_ib_value(p
, idx
) << 8;
1043 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1044 track
->db_bo
= reloc
->robj
;
1045 track
->db_bo_mc
= reloc
->lobj
.gpu_offset
;
1047 case DB_HTILE_DATA_BASE
:
1048 case SQ_PGM_START_FS
:
1049 case SQ_PGM_START_ES
:
1050 case SQ_PGM_START_VS
:
1051 case SQ_PGM_START_GS
:
1052 case SQ_PGM_START_PS
:
1053 case SQ_ALU_CONST_CACHE_GS_0
:
1054 case SQ_ALU_CONST_CACHE_GS_1
:
1055 case SQ_ALU_CONST_CACHE_GS_2
:
1056 case SQ_ALU_CONST_CACHE_GS_3
:
1057 case SQ_ALU_CONST_CACHE_GS_4
:
1058 case SQ_ALU_CONST_CACHE_GS_5
:
1059 case SQ_ALU_CONST_CACHE_GS_6
:
1060 case SQ_ALU_CONST_CACHE_GS_7
:
1061 case SQ_ALU_CONST_CACHE_GS_8
:
1062 case SQ_ALU_CONST_CACHE_GS_9
:
1063 case SQ_ALU_CONST_CACHE_GS_10
:
1064 case SQ_ALU_CONST_CACHE_GS_11
:
1065 case SQ_ALU_CONST_CACHE_GS_12
:
1066 case SQ_ALU_CONST_CACHE_GS_13
:
1067 case SQ_ALU_CONST_CACHE_GS_14
:
1068 case SQ_ALU_CONST_CACHE_GS_15
:
1069 case SQ_ALU_CONST_CACHE_PS_0
:
1070 case SQ_ALU_CONST_CACHE_PS_1
:
1071 case SQ_ALU_CONST_CACHE_PS_2
:
1072 case SQ_ALU_CONST_CACHE_PS_3
:
1073 case SQ_ALU_CONST_CACHE_PS_4
:
1074 case SQ_ALU_CONST_CACHE_PS_5
:
1075 case SQ_ALU_CONST_CACHE_PS_6
:
1076 case SQ_ALU_CONST_CACHE_PS_7
:
1077 case SQ_ALU_CONST_CACHE_PS_8
:
1078 case SQ_ALU_CONST_CACHE_PS_9
:
1079 case SQ_ALU_CONST_CACHE_PS_10
:
1080 case SQ_ALU_CONST_CACHE_PS_11
:
1081 case SQ_ALU_CONST_CACHE_PS_12
:
1082 case SQ_ALU_CONST_CACHE_PS_13
:
1083 case SQ_ALU_CONST_CACHE_PS_14
:
1084 case SQ_ALU_CONST_CACHE_PS_15
:
1085 case SQ_ALU_CONST_CACHE_VS_0
:
1086 case SQ_ALU_CONST_CACHE_VS_1
:
1087 case SQ_ALU_CONST_CACHE_VS_2
:
1088 case SQ_ALU_CONST_CACHE_VS_3
:
1089 case SQ_ALU_CONST_CACHE_VS_4
:
1090 case SQ_ALU_CONST_CACHE_VS_5
:
1091 case SQ_ALU_CONST_CACHE_VS_6
:
1092 case SQ_ALU_CONST_CACHE_VS_7
:
1093 case SQ_ALU_CONST_CACHE_VS_8
:
1094 case SQ_ALU_CONST_CACHE_VS_9
:
1095 case SQ_ALU_CONST_CACHE_VS_10
:
1096 case SQ_ALU_CONST_CACHE_VS_11
:
1097 case SQ_ALU_CONST_CACHE_VS_12
:
1098 case SQ_ALU_CONST_CACHE_VS_13
:
1099 case SQ_ALU_CONST_CACHE_VS_14
:
1100 case SQ_ALU_CONST_CACHE_VS_15
:
1101 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1103 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
1107 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1110 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
1116 static inline unsigned minify(unsigned size
, unsigned levels
)
1118 size
= size
>> levels
;
1124 static void r600_texture_size(unsigned nfaces
, unsigned blevel
, unsigned nlevels
,
1125 unsigned w0
, unsigned h0
, unsigned d0
, unsigned bpe
,
1126 unsigned pitch_align
,
1127 unsigned *l0_size
, unsigned *mipmap_size
)
1129 unsigned offset
, i
, level
, face
;
1130 unsigned width
, height
, depth
, rowstride
, size
;
1135 for(i
= 0, offset
= 0, level
= blevel
; i
< nlevels
; i
++, level
++) {
1136 width
= minify(w0
, i
);
1137 height
= minify(h0
, i
);
1138 depth
= minify(d0
, i
);
1139 for(face
= 0; face
< nfaces
; face
++) {
1140 rowstride
= ALIGN((width
* bpe
), pitch_align
);
1141 size
= height
* rowstride
* depth
;
1143 offset
= (offset
+ 0x1f) & ~0x1f;
1146 *l0_size
= ALIGN((w0
* bpe
), pitch_align
) * h0
* d0
;
1147 *mipmap_size
= offset
;
1149 *mipmap_size
= *l0_size
;
1151 *mipmap_size
-= *l0_size
;
1155 * r600_check_texture_resource() - check if register is authorized or not
1156 * @p: parser structure holding parsing context
1157 * @idx: index into the cs buffer
1158 * @texture: texture's bo structure
1159 * @mipmap: mipmap's bo structure
1161 * This function will check that the resource has valid field and that
1162 * the texture and mipmap bo object are big enough to cover this resource.
1164 static inline int r600_check_texture_resource(struct radeon_cs_parser
*p
, u32 idx
,
1165 struct radeon_bo
*texture
,
1166 struct radeon_bo
*mipmap
,
1171 struct r600_cs_track
*track
= p
->track
;
1172 u32 nfaces
, nlevels
, blevel
, w0
, h0
, d0
, bpe
= 0;
1173 u32 word0
, word1
, l0_size
, mipmap_size
;
1174 u32 height_align
, pitch
, pitch_align
, depth_align
;
1176 struct array_mode_checker array_check
;
1178 /* on legacy kernel we don't perform advanced check */
1179 if (p
->rdev
== NULL
)
1182 /* convert to bytes */
1186 word0
= radeon_get_ib_value(p
, idx
+ 0);
1187 if (tiling_flags
& RADEON_TILING_MACRO
)
1188 word0
|= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1
);
1189 else if (tiling_flags
& RADEON_TILING_MICRO
)
1190 word0
|= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1
);
1191 word1
= radeon_get_ib_value(p
, idx
+ 1);
1192 w0
= G_038000_TEX_WIDTH(word0
) + 1;
1193 h0
= G_038004_TEX_HEIGHT(word1
) + 1;
1194 d0
= G_038004_TEX_DEPTH(word1
);
1196 switch (G_038000_DIM(word0
)) {
1197 case V_038000_SQ_TEX_DIM_1D
:
1198 case V_038000_SQ_TEX_DIM_2D
:
1199 case V_038000_SQ_TEX_DIM_3D
:
1201 case V_038000_SQ_TEX_DIM_CUBEMAP
:
1204 case V_038000_SQ_TEX_DIM_1D_ARRAY
:
1205 case V_038000_SQ_TEX_DIM_2D_ARRAY
:
1206 case V_038000_SQ_TEX_DIM_2D_MSAA
:
1207 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA
:
1209 dev_warn(p
->dev
, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0
));
1212 if (r600_bpe_from_format(&bpe
, G_038004_DATA_FORMAT(word1
))) {
1213 dev_warn(p
->dev
, "%s:%d texture invalid format %d\n",
1214 __func__
, __LINE__
, G_038004_DATA_FORMAT(word1
));
1218 /* pitch in texels */
1219 pitch
= (G_038000_PITCH(word0
) + 1) * 8;
1220 array_check
.array_mode
= G_038000_TILE_MODE(word0
);
1221 array_check
.group_size
= track
->group_size
;
1222 array_check
.nbanks
= track
->nbanks
;
1223 array_check
.npipes
= track
->npipes
;
1224 array_check
.nsamples
= 1;
1225 array_check
.bpe
= bpe
;
1226 if (r600_get_array_mode_alignment(&array_check
,
1227 &pitch_align
, &height_align
, &depth_align
, &base_align
)) {
1228 dev_warn(p
->dev
, "%s:%d tex array mode (%d) invalid\n",
1229 __func__
, __LINE__
, G_038000_TILE_MODE(word0
));
1233 /* XXX check height as well... */
1235 if (!IS_ALIGNED(pitch
, pitch_align
)) {
1236 dev_warn(p
->dev
, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
1237 __func__
, __LINE__
, pitch
, pitch_align
, G_038000_TILE_MODE(word0
));
1240 if (!IS_ALIGNED(base_offset
, base_align
)) {
1241 dev_warn(p
->dev
, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
1242 __func__
, __LINE__
, base_offset
, base_align
, G_038000_TILE_MODE(word0
));
1245 if (!IS_ALIGNED(mip_offset
, base_align
)) {
1246 dev_warn(p
->dev
, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
1247 __func__
, __LINE__
, mip_offset
, base_align
, G_038000_TILE_MODE(word0
));
1251 word0
= radeon_get_ib_value(p
, idx
+ 4);
1252 word1
= radeon_get_ib_value(p
, idx
+ 5);
1253 blevel
= G_038010_BASE_LEVEL(word0
);
1254 nlevels
= G_038014_LAST_LEVEL(word1
);
1255 r600_texture_size(nfaces
, blevel
, nlevels
, w0
, h0
, d0
, bpe
,
1256 (pitch_align
* bpe
),
1257 &l0_size
, &mipmap_size
);
1258 /* using get ib will give us the offset into the texture bo */
1259 word0
= radeon_get_ib_value(p
, idx
+ 2) << 8;
1260 if ((l0_size
+ word0
) > radeon_bo_size(texture
)) {
1261 dev_warn(p
->dev
, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
1262 w0
, h0
, bpe
, word0
, l0_size
, radeon_bo_size(texture
));
1265 /* using get ib will give us the offset into the mipmap bo */
1266 word0
= radeon_get_ib_value(p
, idx
+ 3) << 8;
1267 if ((mipmap_size
+ word0
) > radeon_bo_size(mipmap
)) {
1268 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1269 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
1274 static int r600_packet3_check(struct radeon_cs_parser
*p
,
1275 struct radeon_cs_packet
*pkt
)
1277 struct radeon_cs_reloc
*reloc
;
1278 struct r600_cs_track
*track
;
1282 unsigned start_reg
, end_reg
, reg
;
1286 track
= (struct r600_cs_track
*)p
->track
;
1289 idx_value
= radeon_get_ib_value(p
, idx
);
1291 switch (pkt
->opcode
) {
1292 case PACKET3_START_3D_CMDBUF
:
1293 if (p
->family
>= CHIP_RV770
|| pkt
->count
) {
1294 DRM_ERROR("bad START_3D\n");
1298 case PACKET3_CONTEXT_CONTROL
:
1299 if (pkt
->count
!= 1) {
1300 DRM_ERROR("bad CONTEXT_CONTROL\n");
1304 case PACKET3_INDEX_TYPE
:
1305 case PACKET3_NUM_INSTANCES
:
1307 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1311 case PACKET3_DRAW_INDEX
:
1312 if (pkt
->count
!= 3) {
1313 DRM_ERROR("bad DRAW_INDEX\n");
1316 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1318 DRM_ERROR("bad DRAW_INDEX\n");
1321 ib
[idx
+0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1322 ib
[idx
+1] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1323 r
= r600_cs_track_check(p
);
1325 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1329 case PACKET3_DRAW_INDEX_AUTO
:
1330 if (pkt
->count
!= 1) {
1331 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1334 r
= r600_cs_track_check(p
);
1336 dev_warn(p
->dev
, "%s:%d invalid cmd stream %d\n", __func__
, __LINE__
, idx
);
1340 case PACKET3_DRAW_INDEX_IMMD_BE
:
1341 case PACKET3_DRAW_INDEX_IMMD
:
1342 if (pkt
->count
< 2) {
1343 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1346 r
= r600_cs_track_check(p
);
1348 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1352 case PACKET3_WAIT_REG_MEM
:
1353 if (pkt
->count
!= 5) {
1354 DRM_ERROR("bad WAIT_REG_MEM\n");
1357 /* bit 4 is reg (0) or mem (1) */
1358 if (idx_value
& 0x10) {
1359 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1361 DRM_ERROR("bad WAIT_REG_MEM\n");
1364 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1365 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1368 case PACKET3_SURFACE_SYNC
:
1369 if (pkt
->count
!= 3) {
1370 DRM_ERROR("bad SURFACE_SYNC\n");
1373 /* 0xffffffff/0x0 is flush all cache flag */
1374 if (radeon_get_ib_value(p
, idx
+ 1) != 0xffffffff ||
1375 radeon_get_ib_value(p
, idx
+ 2) != 0) {
1376 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1378 DRM_ERROR("bad SURFACE_SYNC\n");
1381 ib
[idx
+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1384 case PACKET3_EVENT_WRITE
:
1385 if (pkt
->count
!= 2 && pkt
->count
!= 0) {
1386 DRM_ERROR("bad EVENT_WRITE\n");
1390 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1392 DRM_ERROR("bad EVENT_WRITE\n");
1395 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1396 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1399 case PACKET3_EVENT_WRITE_EOP
:
1400 if (pkt
->count
!= 4) {
1401 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1404 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1406 DRM_ERROR("bad EVENT_WRITE\n");
1409 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1410 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1412 case PACKET3_SET_CONFIG_REG
:
1413 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONFIG_REG_OFFSET
;
1414 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1415 if ((start_reg
< PACKET3_SET_CONFIG_REG_OFFSET
) ||
1416 (start_reg
>= PACKET3_SET_CONFIG_REG_END
) ||
1417 (end_reg
>= PACKET3_SET_CONFIG_REG_END
)) {
1418 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1421 for (i
= 0; i
< pkt
->count
; i
++) {
1422 reg
= start_reg
+ (4 * i
);
1423 r
= r600_cs_check_reg(p
, reg
, idx
+1+i
);
1428 case PACKET3_SET_CONTEXT_REG
:
1429 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONTEXT_REG_OFFSET
;
1430 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1431 if ((start_reg
< PACKET3_SET_CONTEXT_REG_OFFSET
) ||
1432 (start_reg
>= PACKET3_SET_CONTEXT_REG_END
) ||
1433 (end_reg
>= PACKET3_SET_CONTEXT_REG_END
)) {
1434 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1437 for (i
= 0; i
< pkt
->count
; i
++) {
1438 reg
= start_reg
+ (4 * i
);
1439 r
= r600_cs_check_reg(p
, reg
, idx
+1+i
);
1444 case PACKET3_SET_RESOURCE
:
1445 if (pkt
->count
% 7) {
1446 DRM_ERROR("bad SET_RESOURCE\n");
1449 start_reg
= (idx_value
<< 2) + PACKET3_SET_RESOURCE_OFFSET
;
1450 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1451 if ((start_reg
< PACKET3_SET_RESOURCE_OFFSET
) ||
1452 (start_reg
>= PACKET3_SET_RESOURCE_END
) ||
1453 (end_reg
>= PACKET3_SET_RESOURCE_END
)) {
1454 DRM_ERROR("bad SET_RESOURCE\n");
1457 for (i
= 0; i
< (pkt
->count
/ 7); i
++) {
1458 struct radeon_bo
*texture
, *mipmap
;
1459 u32 size
, offset
, base_offset
, mip_offset
;
1461 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p
, idx
+(i
*7)+6+1))) {
1462 case SQ_TEX_VTX_VALID_TEXTURE
:
1464 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1466 DRM_ERROR("bad SET_RESOURCE\n");
1469 base_offset
= (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1470 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
1471 ib
[idx
+1+(i
*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1
);
1472 else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
1473 ib
[idx
+1+(i
*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1
);
1474 texture
= reloc
->robj
;
1476 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1478 DRM_ERROR("bad SET_RESOURCE\n");
1481 mip_offset
= (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1482 mipmap
= reloc
->robj
;
1483 r
= r600_check_texture_resource(p
, idx
+(i
*7)+1,
1485 base_offset
+ radeon_get_ib_value(p
, idx
+1+(i
*7)+2),
1486 mip_offset
+ radeon_get_ib_value(p
, idx
+1+(i
*7)+3),
1487 reloc
->lobj
.tiling_flags
);
1490 ib
[idx
+1+(i
*7)+2] += base_offset
;
1491 ib
[idx
+1+(i
*7)+3] += mip_offset
;
1493 case SQ_TEX_VTX_VALID_BUFFER
:
1495 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1497 DRM_ERROR("bad SET_RESOURCE\n");
1500 offset
= radeon_get_ib_value(p
, idx
+1+(i
*7)+0);
1501 size
= radeon_get_ib_value(p
, idx
+1+(i
*7)+1) + 1;
1502 if (p
->rdev
&& (size
+ offset
) > radeon_bo_size(reloc
->robj
)) {
1503 /* force size to size of the buffer */
1504 dev_warn(p
->dev
, "vbo resource seems too big (%d) for the bo (%ld)\n",
1505 size
+ offset
, radeon_bo_size(reloc
->robj
));
1506 ib
[idx
+1+(i
*7)+1] = radeon_bo_size(reloc
->robj
);
1508 ib
[idx
+1+(i
*7)+0] += (u32
)((reloc
->lobj
.gpu_offset
) & 0xffffffff);
1509 ib
[idx
+1+(i
*7)+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1511 case SQ_TEX_VTX_INVALID_TEXTURE
:
1512 case SQ_TEX_VTX_INVALID_BUFFER
:
1514 DRM_ERROR("bad SET_RESOURCE\n");
1519 case PACKET3_SET_ALU_CONST
:
1520 if (track
->sq_config
& DX9_CONSTS
) {
1521 start_reg
= (idx_value
<< 2) + PACKET3_SET_ALU_CONST_OFFSET
;
1522 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1523 if ((start_reg
< PACKET3_SET_ALU_CONST_OFFSET
) ||
1524 (start_reg
>= PACKET3_SET_ALU_CONST_END
) ||
1525 (end_reg
>= PACKET3_SET_ALU_CONST_END
)) {
1526 DRM_ERROR("bad SET_ALU_CONST\n");
1531 case PACKET3_SET_BOOL_CONST
:
1532 start_reg
= (idx_value
<< 2) + PACKET3_SET_BOOL_CONST_OFFSET
;
1533 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1534 if ((start_reg
< PACKET3_SET_BOOL_CONST_OFFSET
) ||
1535 (start_reg
>= PACKET3_SET_BOOL_CONST_END
) ||
1536 (end_reg
>= PACKET3_SET_BOOL_CONST_END
)) {
1537 DRM_ERROR("bad SET_BOOL_CONST\n");
1541 case PACKET3_SET_LOOP_CONST
:
1542 start_reg
= (idx_value
<< 2) + PACKET3_SET_LOOP_CONST_OFFSET
;
1543 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1544 if ((start_reg
< PACKET3_SET_LOOP_CONST_OFFSET
) ||
1545 (start_reg
>= PACKET3_SET_LOOP_CONST_END
) ||
1546 (end_reg
>= PACKET3_SET_LOOP_CONST_END
)) {
1547 DRM_ERROR("bad SET_LOOP_CONST\n");
1551 case PACKET3_SET_CTL_CONST
:
1552 start_reg
= (idx_value
<< 2) + PACKET3_SET_CTL_CONST_OFFSET
;
1553 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1554 if ((start_reg
< PACKET3_SET_CTL_CONST_OFFSET
) ||
1555 (start_reg
>= PACKET3_SET_CTL_CONST_END
) ||
1556 (end_reg
>= PACKET3_SET_CTL_CONST_END
)) {
1557 DRM_ERROR("bad SET_CTL_CONST\n");
1561 case PACKET3_SET_SAMPLER
:
1562 if (pkt
->count
% 3) {
1563 DRM_ERROR("bad SET_SAMPLER\n");
1566 start_reg
= (idx_value
<< 2) + PACKET3_SET_SAMPLER_OFFSET
;
1567 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1568 if ((start_reg
< PACKET3_SET_SAMPLER_OFFSET
) ||
1569 (start_reg
>= PACKET3_SET_SAMPLER_END
) ||
1570 (end_reg
>= PACKET3_SET_SAMPLER_END
)) {
1571 DRM_ERROR("bad SET_SAMPLER\n");
1575 case PACKET3_SURFACE_BASE_UPDATE
:
1576 if (p
->family
>= CHIP_RV770
|| p
->family
== CHIP_R600
) {
1577 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1581 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1588 DRM_ERROR("Packet3 opcode %x not supported\n", pkt
->opcode
);
1594 int r600_cs_parse(struct radeon_cs_parser
*p
)
1596 struct radeon_cs_packet pkt
;
1597 struct r600_cs_track
*track
;
1600 if (p
->track
== NULL
) {
1601 /* initialize tracker, we are in kms */
1602 track
= kzalloc(sizeof(*track
), GFP_KERNEL
);
1605 r600_cs_track_init(track
);
1606 if (p
->rdev
->family
< CHIP_RV770
) {
1607 track
->npipes
= p
->rdev
->config
.r600
.tiling_npipes
;
1608 track
->nbanks
= p
->rdev
->config
.r600
.tiling_nbanks
;
1609 track
->group_size
= p
->rdev
->config
.r600
.tiling_group_size
;
1610 } else if (p
->rdev
->family
<= CHIP_RV740
) {
1611 track
->npipes
= p
->rdev
->config
.rv770
.tiling_npipes
;
1612 track
->nbanks
= p
->rdev
->config
.rv770
.tiling_nbanks
;
1613 track
->group_size
= p
->rdev
->config
.rv770
.tiling_group_size
;
1618 r
= r600_cs_packet_parse(p
, &pkt
, p
->idx
);
1624 p
->idx
+= pkt
.count
+ 2;
1627 r
= r600_cs_parse_packet0(p
, &pkt
);
1632 r
= r600_packet3_check(p
, &pkt
);
1635 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
1645 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
1647 for (r
= 0; r
< p
->ib
->length_dw
; r
++) {
1648 printk(KERN_INFO
"%05d 0x%08X\n", r
, p
->ib
->ptr
[r
]);
1657 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser
*p
)
1659 if (p
->chunk_relocs_idx
== -1) {
1662 p
->relocs
= kzalloc(sizeof(struct radeon_cs_reloc
), GFP_KERNEL
);
1663 if (p
->relocs
== NULL
) {
1670 * cs_parser_fini() - clean parser states
1671 * @parser: parser structure holding parsing context.
1672 * @error: error number
1674 * If error is set than unvalidate buffer, otherwise just free memory
1675 * used by parsing context.
1677 static void r600_cs_parser_fini(struct radeon_cs_parser
*parser
, int error
)
1681 kfree(parser
->relocs
);
1682 for (i
= 0; i
< parser
->nchunks
; i
++) {
1683 kfree(parser
->chunks
[i
].kdata
);
1684 kfree(parser
->chunks
[i
].kpage
[0]);
1685 kfree(parser
->chunks
[i
].kpage
[1]);
1687 kfree(parser
->chunks
);
1688 kfree(parser
->chunks_array
);
1691 int r600_cs_legacy(struct drm_device
*dev
, void *data
, struct drm_file
*filp
,
1692 unsigned family
, u32
*ib
, int *l
)
1694 struct radeon_cs_parser parser
;
1695 struct radeon_cs_chunk
*ib_chunk
;
1696 struct radeon_ib fake_ib
;
1697 struct r600_cs_track
*track
;
1700 /* initialize tracker */
1701 track
= kzalloc(sizeof(*track
), GFP_KERNEL
);
1704 r600_cs_track_init(track
);
1705 r600_cs_legacy_get_tiling_conf(dev
, &track
->npipes
, &track
->nbanks
, &track
->group_size
);
1706 /* initialize parser */
1707 memset(&parser
, 0, sizeof(struct radeon_cs_parser
));
1709 parser
.dev
= &dev
->pdev
->dev
;
1711 parser
.family
= family
;
1712 parser
.ib
= &fake_ib
;
1713 parser
.track
= track
;
1715 r
= radeon_cs_parser_init(&parser
, data
);
1717 DRM_ERROR("Failed to initialize parser !\n");
1718 r600_cs_parser_fini(&parser
, r
);
1721 r
= r600_cs_parser_relocs_legacy(&parser
);
1723 DRM_ERROR("Failed to parse relocation !\n");
1724 r600_cs_parser_fini(&parser
, r
);
1727 /* Copy the packet into the IB, the parser will read from the
1728 * input memory (cached) and write to the IB (which can be
1730 ib_chunk
= &parser
.chunks
[parser
.chunk_ib_idx
];
1731 parser
.ib
->length_dw
= ib_chunk
->length_dw
;
1732 *l
= parser
.ib
->length_dw
;
1733 r
= r600_cs_parse(&parser
);
1735 DRM_ERROR("Invalid command stream !\n");
1736 r600_cs_parser_fini(&parser
, r
);
1739 r
= radeon_cs_finish_pages(&parser
);
1741 DRM_ERROR("Invalid command stream !\n");
1742 r600_cs_parser_fini(&parser
, r
);
1745 r600_cs_parser_fini(&parser
, r
);
1749 void r600_cs_legacy_init(void)
1751 r600_cs_packet_next_reloc
= &r600_cs_packet_next_reloc_nomm
;