2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/kernel.h>
32 #include "r600_reg_safe.h"
34 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser
*p
,
35 struct radeon_cs_reloc
**cs_reloc
);
36 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser
*p
,
37 struct radeon_cs_reloc
**cs_reloc
);
38 typedef int (*next_reloc_t
)(struct radeon_cs_parser
*, struct radeon_cs_reloc
**);
39 static next_reloc_t r600_cs_packet_next_reloc
= &r600_cs_packet_next_reloc_mm
;
40 extern void r600_cs_legacy_get_tiling_conf(struct drm_device
*dev
, u32
*npipes
, u32
*nbanks
, u32
*group_size
);
43 struct r600_cs_track
{
44 /* configuration we miror so that we use same code btw kms/ums */
52 u32 cb_color_base_last
[8];
53 struct radeon_bo
*cb_color_bo
[8];
54 u64 cb_color_bo_mc
[8];
55 u64 cb_color_bo_offset
[8];
56 struct radeon_bo
*cb_color_frag_bo
[8];
57 u64 cb_color_frag_offset
[8];
58 struct radeon_bo
*cb_color_tile_bo
[8];
59 u64 cb_color_tile_offset
[8];
63 u32 cb_color_size_idx
[8]; /* unused */
65 u32 cb_shader_mask
; /* unused */
69 u32 vgt_strmout_buffer_en
;
70 struct radeon_bo
*vgt_strmout_bo
[4];
71 u64 vgt_strmout_bo_mc
[4]; /* unused */
72 u32 vgt_strmout_bo_offset
[4];
73 u32 vgt_strmout_size
[4];
76 u32 db_depth_size_idx
;
80 struct radeon_bo
*db_bo
;
82 bool sx_misc_kill_all_prims
;
86 struct radeon_bo
*htile_bo
;
91 #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
92 #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
93 #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 }
94 #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
95 #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 }
96 #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
97 #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
98 #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
102 unsigned blockheight
;
104 unsigned valid_color
;
105 enum radeon_family min_family
;
108 static const struct gpu_formats color_formats_table
[] = {
110 FMT_8_BIT(V_038004_COLOR_8
, 1),
111 FMT_8_BIT(V_038004_COLOR_4_4
, 1),
112 FMT_8_BIT(V_038004_COLOR_3_3_2
, 1),
113 FMT_8_BIT(V_038004_FMT_1
, 0),
116 FMT_16_BIT(V_038004_COLOR_16
, 1),
117 FMT_16_BIT(V_038004_COLOR_16_FLOAT
, 1),
118 FMT_16_BIT(V_038004_COLOR_8_8
, 1),
119 FMT_16_BIT(V_038004_COLOR_5_6_5
, 1),
120 FMT_16_BIT(V_038004_COLOR_6_5_5
, 1),
121 FMT_16_BIT(V_038004_COLOR_1_5_5_5
, 1),
122 FMT_16_BIT(V_038004_COLOR_4_4_4_4
, 1),
123 FMT_16_BIT(V_038004_COLOR_5_5_5_1
, 1),
126 FMT_24_BIT(V_038004_FMT_8_8_8
),
129 FMT_32_BIT(V_038004_COLOR_32
, 1),
130 FMT_32_BIT(V_038004_COLOR_32_FLOAT
, 1),
131 FMT_32_BIT(V_038004_COLOR_16_16
, 1),
132 FMT_32_BIT(V_038004_COLOR_16_16_FLOAT
, 1),
133 FMT_32_BIT(V_038004_COLOR_8_24
, 1),
134 FMT_32_BIT(V_038004_COLOR_8_24_FLOAT
, 1),
135 FMT_32_BIT(V_038004_COLOR_24_8
, 1),
136 FMT_32_BIT(V_038004_COLOR_24_8_FLOAT
, 1),
137 FMT_32_BIT(V_038004_COLOR_10_11_11
, 1),
138 FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT
, 1),
139 FMT_32_BIT(V_038004_COLOR_11_11_10
, 1),
140 FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT
, 1),
141 FMT_32_BIT(V_038004_COLOR_2_10_10_10
, 1),
142 FMT_32_BIT(V_038004_COLOR_8_8_8_8
, 1),
143 FMT_32_BIT(V_038004_COLOR_10_10_10_2
, 1),
144 FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP
, 0),
145 FMT_32_BIT(V_038004_FMT_32_AS_8
, 0),
146 FMT_32_BIT(V_038004_FMT_32_AS_8_8
, 0),
149 FMT_48_BIT(V_038004_FMT_16_16_16
),
150 FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT
),
153 FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT
, 1),
154 FMT_64_BIT(V_038004_COLOR_32_32
, 1),
155 FMT_64_BIT(V_038004_COLOR_32_32_FLOAT
, 1),
156 FMT_64_BIT(V_038004_COLOR_16_16_16_16
, 1),
157 FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT
, 1),
159 FMT_96_BIT(V_038004_FMT_32_32_32
),
160 FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT
),
163 FMT_128_BIT(V_038004_COLOR_32_32_32_32
, 1),
164 FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT
, 1),
166 [V_038004_FMT_GB_GR
] = { 2, 1, 4, 0 },
167 [V_038004_FMT_BG_RG
] = { 2, 1, 4, 0 },
169 /* block compressed formats */
170 [V_038004_FMT_BC1
] = { 4, 4, 8, 0 },
171 [V_038004_FMT_BC2
] = { 4, 4, 16, 0 },
172 [V_038004_FMT_BC3
] = { 4, 4, 16, 0 },
173 [V_038004_FMT_BC4
] = { 4, 4, 8, 0 },
174 [V_038004_FMT_BC5
] = { 4, 4, 16, 0},
175 [V_038004_FMT_BC6
] = { 4, 4, 16, 0, CHIP_CEDAR
}, /* Evergreen-only */
176 [V_038004_FMT_BC7
] = { 4, 4, 16, 0, CHIP_CEDAR
}, /* Evergreen-only */
178 /* The other Evergreen formats */
179 [V_038004_FMT_32_AS_32_32_32_32
] = { 1, 1, 4, 0, CHIP_CEDAR
},
182 bool r600_fmt_is_valid_color(u32 format
)
184 if (format
>= ARRAY_SIZE(color_formats_table
))
187 if (color_formats_table
[format
].valid_color
)
193 bool r600_fmt_is_valid_texture(u32 format
, enum radeon_family family
)
195 if (format
>= ARRAY_SIZE(color_formats_table
))
198 if (family
< color_formats_table
[format
].min_family
)
201 if (color_formats_table
[format
].blockwidth
> 0)
207 int r600_fmt_get_blocksize(u32 format
)
209 if (format
>= ARRAY_SIZE(color_formats_table
))
212 return color_formats_table
[format
].blocksize
;
215 int r600_fmt_get_nblocksx(u32 format
, u32 w
)
219 if (format
>= ARRAY_SIZE(color_formats_table
))
222 bw
= color_formats_table
[format
].blockwidth
;
226 return (w
+ bw
- 1) / bw
;
229 int r600_fmt_get_nblocksy(u32 format
, u32 h
)
233 if (format
>= ARRAY_SIZE(color_formats_table
))
236 bh
= color_formats_table
[format
].blockheight
;
240 return (h
+ bh
- 1) / bh
;
243 struct array_mode_checker
{
252 /* returns alignment in pixels for pitch/height/depth and bytes for base */
253 static int r600_get_array_mode_alignment(struct array_mode_checker
*values
,
261 u32 macro_tile_width
= values
->nbanks
;
262 u32 macro_tile_height
= values
->npipes
;
263 u32 tile_bytes
= tile_width
* tile_height
* values
->blocksize
* values
->nsamples
;
264 u32 macro_tile_bytes
= macro_tile_width
* macro_tile_height
* tile_bytes
;
266 switch (values
->array_mode
) {
267 case ARRAY_LINEAR_GENERAL
:
268 /* technically tile_width/_height for pitch/height */
269 *pitch_align
= 1; /* tile_width */
270 *height_align
= 1; /* tile_height */
274 case ARRAY_LINEAR_ALIGNED
:
275 *pitch_align
= max((u32
)64, (u32
)(values
->group_size
/ values
->blocksize
));
278 *base_align
= values
->group_size
;
280 case ARRAY_1D_TILED_THIN1
:
281 *pitch_align
= max((u32
)tile_width
,
282 (u32
)(values
->group_size
/
283 (tile_height
* values
->blocksize
* values
->nsamples
)));
284 *height_align
= tile_height
;
286 *base_align
= values
->group_size
;
288 case ARRAY_2D_TILED_THIN1
:
289 *pitch_align
= max((u32
)macro_tile_width
* tile_width
,
290 (u32
)((values
->group_size
* values
->nbanks
) /
291 (values
->blocksize
* values
->nsamples
* tile_width
)));
292 *height_align
= macro_tile_height
* tile_height
;
294 *base_align
= max(macro_tile_bytes
,
295 (*pitch_align
) * values
->blocksize
* (*height_align
) * values
->nsamples
);
304 static void r600_cs_track_init(struct r600_cs_track
*track
)
308 /* assume DX9 mode */
309 track
->sq_config
= DX9_CONSTS
;
310 for (i
= 0; i
< 8; i
++) {
311 track
->cb_color_base_last
[i
] = 0;
312 track
->cb_color_size
[i
] = 0;
313 track
->cb_color_size_idx
[i
] = 0;
314 track
->cb_color_info
[i
] = 0;
315 track
->cb_color_view
[i
] = 0xFFFFFFFF;
316 track
->cb_color_bo
[i
] = NULL
;
317 track
->cb_color_bo_offset
[i
] = 0xFFFFFFFF;
318 track
->cb_color_bo_mc
[i
] = 0xFFFFFFFF;
319 track
->cb_color_frag_bo
[i
] = NULL
;
320 track
->cb_color_frag_offset
[i
] = 0xFFFFFFFF;
321 track
->cb_color_tile_bo
[i
] = NULL
;
322 track
->cb_color_tile_offset
[i
] = 0xFFFFFFFF;
323 track
->cb_color_mask
[i
] = 0xFFFFFFFF;
325 track
->is_resolve
= false;
326 track
->nsamples
= 16;
327 track
->log_nsamples
= 4;
328 track
->cb_target_mask
= 0xFFFFFFFF;
329 track
->cb_shader_mask
= 0xFFFFFFFF;
330 track
->cb_dirty
= true;
332 track
->db_bo_mc
= 0xFFFFFFFF;
333 /* assume the biggest format and that htile is enabled */
334 track
->db_depth_info
= 7 | (1 << 25);
335 track
->db_depth_view
= 0xFFFFC000;
336 track
->db_depth_size
= 0xFFFFFFFF;
337 track
->db_depth_size_idx
= 0;
338 track
->db_depth_control
= 0xFFFFFFFF;
339 track
->db_dirty
= true;
340 track
->htile_bo
= NULL
;
341 track
->htile_offset
= 0xFFFFFFFF;
342 track
->htile_surface
= 0;
344 for (i
= 0; i
< 4; i
++) {
345 track
->vgt_strmout_size
[i
] = 0;
346 track
->vgt_strmout_bo
[i
] = NULL
;
347 track
->vgt_strmout_bo_offset
[i
] = 0xFFFFFFFF;
348 track
->vgt_strmout_bo_mc
[i
] = 0xFFFFFFFF;
350 track
->streamout_dirty
= true;
351 track
->sx_misc_kill_all_prims
= false;
354 static int r600_cs_track_validate_cb(struct radeon_cs_parser
*p
, int i
)
356 struct r600_cs_track
*track
= p
->track
;
357 u32 slice_tile_max
, size
, tmp
;
358 u32 height
, height_align
, pitch
, pitch_align
, depth_align
;
359 u64 base_offset
, base_align
;
360 struct array_mode_checker array_check
;
361 volatile u32
*ib
= p
->ib
.ptr
;
364 /* When resolve is used, the second colorbuffer has always 1 sample. */
365 unsigned nsamples
= track
->is_resolve
&& i
== 1 ? 1 : track
->nsamples
;
367 size
= radeon_bo_size(track
->cb_color_bo
[i
]) - track
->cb_color_bo_offset
[i
];
368 format
= G_0280A0_FORMAT(track
->cb_color_info
[i
]);
369 if (!r600_fmt_is_valid_color(format
)) {
370 dev_warn(p
->dev
, "%s:%d cb invalid format %d for %d (0x%08X)\n",
371 __func__
, __LINE__
, format
,
372 i
, track
->cb_color_info
[i
]);
375 /* pitch in pixels */
376 pitch
= (G_028060_PITCH_TILE_MAX(track
->cb_color_size
[i
]) + 1) * 8;
377 slice_tile_max
= G_028060_SLICE_TILE_MAX(track
->cb_color_size
[i
]) + 1;
378 slice_tile_max
*= 64;
379 height
= slice_tile_max
/ pitch
;
382 array_mode
= G_0280A0_ARRAY_MODE(track
->cb_color_info
[i
]);
384 base_offset
= track
->cb_color_bo_mc
[i
] + track
->cb_color_bo_offset
[i
];
385 array_check
.array_mode
= array_mode
;
386 array_check
.group_size
= track
->group_size
;
387 array_check
.nbanks
= track
->nbanks
;
388 array_check
.npipes
= track
->npipes
;
389 array_check
.nsamples
= nsamples
;
390 array_check
.blocksize
= r600_fmt_get_blocksize(format
);
391 if (r600_get_array_mode_alignment(&array_check
,
392 &pitch_align
, &height_align
, &depth_align
, &base_align
)) {
393 dev_warn(p
->dev
, "%s invalid tiling %d for %d (0x%08X)\n", __func__
,
394 G_0280A0_ARRAY_MODE(track
->cb_color_info
[i
]), i
,
395 track
->cb_color_info
[i
]);
398 switch (array_mode
) {
399 case V_0280A0_ARRAY_LINEAR_GENERAL
:
401 case V_0280A0_ARRAY_LINEAR_ALIGNED
:
403 case V_0280A0_ARRAY_1D_TILED_THIN1
:
404 /* avoid breaking userspace */
408 case V_0280A0_ARRAY_2D_TILED_THIN1
:
411 dev_warn(p
->dev
, "%s invalid tiling %d for %d (0x%08X)\n", __func__
,
412 G_0280A0_ARRAY_MODE(track
->cb_color_info
[i
]), i
,
413 track
->cb_color_info
[i
]);
417 if (!IS_ALIGNED(pitch
, pitch_align
)) {
418 dev_warn(p
->dev
, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
419 __func__
, __LINE__
, pitch
, pitch_align
, array_mode
);
422 if (!IS_ALIGNED(height
, height_align
)) {
423 dev_warn(p
->dev
, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
424 __func__
, __LINE__
, height
, height_align
, array_mode
);
427 if (!IS_ALIGNED(base_offset
, base_align
)) {
428 dev_warn(p
->dev
, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__
, i
,
429 base_offset
, base_align
, array_mode
);
434 tmp
= r600_fmt_get_nblocksy(format
, height
) * r600_fmt_get_nblocksx(format
, pitch
) *
435 r600_fmt_get_blocksize(format
) * nsamples
;
436 switch (array_mode
) {
438 case V_0280A0_ARRAY_LINEAR_GENERAL
:
439 case V_0280A0_ARRAY_LINEAR_ALIGNED
:
440 tmp
+= track
->cb_color_view
[i
] & 0xFF;
442 case V_0280A0_ARRAY_1D_TILED_THIN1
:
443 case V_0280A0_ARRAY_2D_TILED_THIN1
:
444 tmp
+= G_028080_SLICE_MAX(track
->cb_color_view
[i
]) * tmp
;
447 if ((tmp
+ track
->cb_color_bo_offset
[i
]) > radeon_bo_size(track
->cb_color_bo
[i
])) {
448 if (array_mode
== V_0280A0_ARRAY_LINEAR_GENERAL
) {
449 /* the initial DDX does bad things with the CB size occasionally */
450 /* it rounds up height too far for slice tile max but the BO is smaller */
451 /* r600c,g also seem to flush at bad times in some apps resulting in
452 * bogus values here. So for linear just allow anything to avoid breaking
456 dev_warn(p
->dev
, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
457 __func__
, i
, array_mode
,
458 track
->cb_color_bo_offset
[i
], tmp
,
459 radeon_bo_size(track
->cb_color_bo
[i
]),
460 pitch
, height
, r600_fmt_get_nblocksx(format
, pitch
),
461 r600_fmt_get_nblocksy(format
, height
),
462 r600_fmt_get_blocksize(format
));
467 tmp
= (height
* pitch
) >> 6;
468 if (tmp
< slice_tile_max
)
469 slice_tile_max
= tmp
;
470 tmp
= S_028060_PITCH_TILE_MAX((pitch
/ 8) - 1) |
471 S_028060_SLICE_TILE_MAX(slice_tile_max
- 1);
472 ib
[track
->cb_color_size_idx
[i
]] = tmp
;
475 switch (G_0280A0_TILE_MODE(track
->cb_color_info
[i
])) {
476 case V_0280A0_TILE_DISABLE
:
478 case V_0280A0_FRAG_ENABLE
:
479 if (track
->nsamples
> 1) {
480 uint32_t tile_max
= G_028100_FMASK_TILE_MAX(track
->cb_color_mask
[i
]);
481 /* the tile size is 8x8, but the size is in units of bits.
482 * for bytes, do just * 8. */
483 uint32_t bytes
= track
->nsamples
* track
->log_nsamples
* 8 * (tile_max
+ 1);
485 if (bytes
+ track
->cb_color_frag_offset
[i
] >
486 radeon_bo_size(track
->cb_color_frag_bo
[i
])) {
487 dev_warn(p
->dev
, "%s FMASK_TILE_MAX too large "
488 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
489 __func__
, tile_max
, bytes
,
490 track
->cb_color_frag_offset
[i
],
491 radeon_bo_size(track
->cb_color_frag_bo
[i
]));
496 case V_0280A0_CLEAR_ENABLE
:
498 uint32_t block_max
= G_028100_CMASK_BLOCK_MAX(track
->cb_color_mask
[i
]);
499 /* One block = 128x128 pixels, one 8x8 tile has 4 bits..
500 * (128*128) / (8*8) / 2 = 128 bytes per block. */
501 uint32_t bytes
= (block_max
+ 1) * 128;
503 if (bytes
+ track
->cb_color_tile_offset
[i
] >
504 radeon_bo_size(track
->cb_color_tile_bo
[i
])) {
505 dev_warn(p
->dev
, "%s CMASK_BLOCK_MAX too large "
506 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
507 __func__
, block_max
, bytes
,
508 track
->cb_color_tile_offset
[i
],
509 radeon_bo_size(track
->cb_color_tile_bo
[i
]));
515 dev_warn(p
->dev
, "%s invalid tile mode\n", __func__
);
521 static int r600_cs_track_validate_db(struct radeon_cs_parser
*p
)
523 struct r600_cs_track
*track
= p
->track
;
524 u32 nviews
, bpe
, ntiles
, size
, slice_tile_max
, tmp
;
525 u32 height_align
, pitch_align
, depth_align
;
528 u64 base_offset
, base_align
;
529 struct array_mode_checker array_check
;
531 volatile u32
*ib
= p
->ib
.ptr
;
534 if (track
->db_bo
== NULL
) {
535 dev_warn(p
->dev
, "z/stencil with no depth buffer\n");
538 switch (G_028010_FORMAT(track
->db_depth_info
)) {
539 case V_028010_DEPTH_16
:
542 case V_028010_DEPTH_X8_24
:
543 case V_028010_DEPTH_8_24
:
544 case V_028010_DEPTH_X8_24_FLOAT
:
545 case V_028010_DEPTH_8_24_FLOAT
:
546 case V_028010_DEPTH_32_FLOAT
:
549 case V_028010_DEPTH_X24_8_32_FLOAT
:
553 dev_warn(p
->dev
, "z/stencil with invalid format %d\n", G_028010_FORMAT(track
->db_depth_info
));
556 if ((track
->db_depth_size
& 0xFFFFFC00) == 0xFFFFFC00) {
557 if (!track
->db_depth_size_idx
) {
558 dev_warn(p
->dev
, "z/stencil buffer size not set\n");
561 tmp
= radeon_bo_size(track
->db_bo
) - track
->db_offset
;
562 tmp
= (tmp
/ bpe
) >> 6;
564 dev_warn(p
->dev
, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
565 track
->db_depth_size
, bpe
, track
->db_offset
,
566 radeon_bo_size(track
->db_bo
));
569 ib
[track
->db_depth_size_idx
] = S_028000_SLICE_TILE_MAX(tmp
- 1) | (track
->db_depth_size
& 0x3FF);
571 size
= radeon_bo_size(track
->db_bo
);
572 /* pitch in pixels */
573 pitch
= (G_028000_PITCH_TILE_MAX(track
->db_depth_size
) + 1) * 8;
574 slice_tile_max
= G_028000_SLICE_TILE_MAX(track
->db_depth_size
) + 1;
575 slice_tile_max
*= 64;
576 height
= slice_tile_max
/ pitch
;
579 base_offset
= track
->db_bo_mc
+ track
->db_offset
;
580 array_mode
= G_028010_ARRAY_MODE(track
->db_depth_info
);
581 array_check
.array_mode
= array_mode
;
582 array_check
.group_size
= track
->group_size
;
583 array_check
.nbanks
= track
->nbanks
;
584 array_check
.npipes
= track
->npipes
;
585 array_check
.nsamples
= track
->nsamples
;
586 array_check
.blocksize
= bpe
;
587 if (r600_get_array_mode_alignment(&array_check
,
588 &pitch_align
, &height_align
, &depth_align
, &base_align
)) {
589 dev_warn(p
->dev
, "%s invalid tiling %d (0x%08X)\n", __func__
,
590 G_028010_ARRAY_MODE(track
->db_depth_info
),
591 track
->db_depth_info
);
594 switch (array_mode
) {
595 case V_028010_ARRAY_1D_TILED_THIN1
:
596 /* don't break userspace */
599 case V_028010_ARRAY_2D_TILED_THIN1
:
602 dev_warn(p
->dev
, "%s invalid tiling %d (0x%08X)\n", __func__
,
603 G_028010_ARRAY_MODE(track
->db_depth_info
),
604 track
->db_depth_info
);
608 if (!IS_ALIGNED(pitch
, pitch_align
)) {
609 dev_warn(p
->dev
, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
610 __func__
, __LINE__
, pitch
, pitch_align
, array_mode
);
613 if (!IS_ALIGNED(height
, height_align
)) {
614 dev_warn(p
->dev
, "%s:%d db height (%d, 0x%x, %d) invalid\n",
615 __func__
, __LINE__
, height
, height_align
, array_mode
);
618 if (!IS_ALIGNED(base_offset
, base_align
)) {
619 dev_warn(p
->dev
, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__
,
620 base_offset
, base_align
, array_mode
);
624 ntiles
= G_028000_SLICE_TILE_MAX(track
->db_depth_size
) + 1;
625 nviews
= G_028004_SLICE_MAX(track
->db_depth_view
) + 1;
626 tmp
= ntiles
* bpe
* 64 * nviews
* track
->nsamples
;
627 if ((tmp
+ track
->db_offset
) > radeon_bo_size(track
->db_bo
)) {
628 dev_warn(p
->dev
, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
630 track
->db_depth_size
, ntiles
, nviews
, bpe
, tmp
+ track
->db_offset
,
631 radeon_bo_size(track
->db_bo
));
637 if (G_028010_TILE_SURFACE_ENABLE(track
->db_depth_info
)) {
641 if (track
->htile_bo
== NULL
) {
642 dev_warn(p
->dev
, "%s:%d htile enabled without htile surface 0x%08x\n",
643 __func__
, __LINE__
, track
->db_depth_info
);
646 if ((track
->db_depth_size
& 0xFFFFFC00) == 0xFFFFFC00) {
647 dev_warn(p
->dev
, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
648 __func__
, __LINE__
, track
->db_depth_size
);
654 if (G_028D24_LINEAR(track
->htile_surface
)) {
655 /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
656 nbx
= round_up(nbx
, 16 * 8);
657 /* nby is npipes htiles aligned == npipes * 8 pixel aligned */
658 nby
= round_up(nby
, track
->npipes
* 8);
660 /* always assume 8x8 htile */
661 /* align is htile align * 8, htile align vary according to
662 * number of pipe and tile width and nby
664 switch (track
->npipes
) {
666 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
667 nbx
= round_up(nbx
, 64 * 8);
668 nby
= round_up(nby
, 64 * 8);
671 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
672 nbx
= round_up(nbx
, 64 * 8);
673 nby
= round_up(nby
, 32 * 8);
676 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
677 nbx
= round_up(nbx
, 32 * 8);
678 nby
= round_up(nby
, 32 * 8);
681 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
682 nbx
= round_up(nbx
, 32 * 8);
683 nby
= round_up(nby
, 16 * 8);
686 dev_warn(p
->dev
, "%s:%d invalid num pipes %d\n",
687 __func__
, __LINE__
, track
->npipes
);
691 /* compute number of htile */
694 /* size must be aligned on npipes * 2K boundary */
695 size
= roundup(nbx
* nby
* 4, track
->npipes
* (2 << 10));
696 size
+= track
->htile_offset
;
698 if (size
> radeon_bo_size(track
->htile_bo
)) {
699 dev_warn(p
->dev
, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
700 __func__
, __LINE__
, radeon_bo_size(track
->htile_bo
),
706 track
->db_dirty
= false;
710 static int r600_cs_track_check(struct radeon_cs_parser
*p
)
712 struct r600_cs_track
*track
= p
->track
;
716 /* on legacy kernel we don't perform advanced check */
720 /* check streamout */
721 if (track
->streamout_dirty
&& track
->vgt_strmout_en
) {
722 for (i
= 0; i
< 4; i
++) {
723 if (track
->vgt_strmout_buffer_en
& (1 << i
)) {
724 if (track
->vgt_strmout_bo
[i
]) {
725 u64 offset
= (u64
)track
->vgt_strmout_bo_offset
[i
] +
726 (u64
)track
->vgt_strmout_size
[i
];
727 if (offset
> radeon_bo_size(track
->vgt_strmout_bo
[i
])) {
728 DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
730 radeon_bo_size(track
->vgt_strmout_bo
[i
]));
734 dev_warn(p
->dev
, "No buffer for streamout %d\n", i
);
739 track
->streamout_dirty
= false;
742 if (track
->sx_misc_kill_all_prims
)
745 /* check that we have a cb for each enabled target, we don't check
746 * shader_mask because it seems mesa isn't always setting it :(
748 if (track
->cb_dirty
) {
749 tmp
= track
->cb_target_mask
;
751 /* We must check both colorbuffers for RESOLVE. */
752 if (track
->is_resolve
) {
756 for (i
= 0; i
< 8; i
++) {
757 if ((tmp
>> (i
* 4)) & 0xF) {
758 /* at least one component is enabled */
759 if (track
->cb_color_bo
[i
] == NULL
) {
760 dev_warn(p
->dev
, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
761 __func__
, __LINE__
, track
->cb_target_mask
, track
->cb_shader_mask
, i
);
764 /* perform rewrite of CB_COLOR[0-7]_SIZE */
765 r
= r600_cs_track_validate_cb(p
, i
);
770 track
->cb_dirty
= false;
773 /* Check depth buffer */
774 if (track
->db_dirty
&&
775 G_028010_FORMAT(track
->db_depth_info
) != V_028010_DEPTH_INVALID
&&
776 (G_028800_STENCIL_ENABLE(track
->db_depth_control
) ||
777 G_028800_Z_ENABLE(track
->db_depth_control
))) {
778 r
= r600_cs_track_validate_db(p
);
787 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
788 * @parser: parser structure holding parsing context.
789 * @pkt: where to store packet informations
791 * Assume that chunk_ib_index is properly set. Will return -EINVAL
792 * if packet is bigger than remaining ib size. or if packets is unknown.
794 static int r600_cs_packet_parse(struct radeon_cs_parser
*p
,
795 struct radeon_cs_packet
*pkt
,
798 struct radeon_cs_chunk
*ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
801 if (idx
>= ib_chunk
->length_dw
) {
802 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
803 idx
, ib_chunk
->length_dw
);
806 header
= radeon_get_ib_value(p
, idx
);
808 pkt
->type
= CP_PACKET_GET_TYPE(header
);
809 pkt
->count
= CP_PACKET_GET_COUNT(header
);
813 pkt
->reg
= CP_PACKET0_GET_REG(header
);
816 pkt
->opcode
= CP_PACKET3_GET_OPCODE(header
);
822 DRM_ERROR("Unknown packet type %d at %d !\n", pkt
->type
, idx
);
825 if ((pkt
->count
+ 1 + pkt
->idx
) >= ib_chunk
->length_dw
) {
826 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
827 pkt
->idx
, pkt
->type
, pkt
->count
, ib_chunk
->length_dw
);
834 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
835 * @parser: parser structure holding parsing context.
836 * @data: pointer to relocation data
837 * @offset_start: starting offset
838 * @offset_mask: offset mask (to align start offset on)
839 * @reloc: reloc informations
841 * Check next packet is relocation packet3, do bo validation and compute
842 * GPU offset using the provided start.
844 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser
*p
,
845 struct radeon_cs_reloc
**cs_reloc
)
847 struct radeon_cs_chunk
*relocs_chunk
;
848 struct radeon_cs_packet p3reloc
;
852 if (p
->chunk_relocs_idx
== -1) {
853 DRM_ERROR("No relocation chunk !\n");
857 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
858 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
862 p
->idx
+= p3reloc
.count
+ 2;
863 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
864 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
868 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
869 if (idx
>= relocs_chunk
->length_dw
) {
870 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
871 idx
, relocs_chunk
->length_dw
);
874 /* FIXME: we assume reloc size is 4 dwords */
875 *cs_reloc
= p
->relocs_ptr
[(idx
/ 4)];
880 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
881 * @parser: parser structure holding parsing context.
882 * @data: pointer to relocation data
883 * @offset_start: starting offset
884 * @offset_mask: offset mask (to align start offset on)
885 * @reloc: reloc informations
887 * Check next packet is relocation packet3, do bo validation and compute
888 * GPU offset using the provided start.
890 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser
*p
,
891 struct radeon_cs_reloc
**cs_reloc
)
893 struct radeon_cs_chunk
*relocs_chunk
;
894 struct radeon_cs_packet p3reloc
;
898 if (p
->chunk_relocs_idx
== -1) {
899 DRM_ERROR("No relocation chunk !\n");
903 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
904 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
908 p
->idx
+= p3reloc
.count
+ 2;
909 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
910 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
914 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
915 if (idx
>= relocs_chunk
->length_dw
) {
916 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
917 idx
, relocs_chunk
->length_dw
);
920 *cs_reloc
= p
->relocs
;
921 (*cs_reloc
)->lobj
.gpu_offset
= (u64
)relocs_chunk
->kdata
[idx
+ 3] << 32;
922 (*cs_reloc
)->lobj
.gpu_offset
|= relocs_chunk
->kdata
[idx
+ 0];
927 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
928 * @parser: parser structure holding parsing context.
930 * Check next packet is relocation packet3, do bo validation and compute
931 * GPU offset using the provided start.
933 static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser
*p
)
935 struct radeon_cs_packet p3reloc
;
938 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
942 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
949 * r600_cs_packet_next_vline() - parse userspace VLINE packet
950 * @parser: parser structure holding parsing context.
952 * Userspace sends a special sequence for VLINE waits.
953 * PACKET0 - VLINE_START_END + value
954 * PACKET3 - WAIT_REG_MEM poll vline status reg
955 * RELOC (P3) - crtc_id in reloc.
957 * This function parses this and relocates the VLINE START END
958 * and WAIT_REG_MEM packets to the correct crtc.
959 * It also detects a switched off crtc and nulls out the
962 static int r600_cs_packet_parse_vline(struct radeon_cs_parser
*p
)
964 struct drm_mode_object
*obj
;
965 struct drm_crtc
*crtc
;
966 struct radeon_crtc
*radeon_crtc
;
967 struct radeon_cs_packet p3reloc
, wait_reg_mem
;
970 uint32_t header
, h_idx
, reg
, wait_reg_mem_info
;
971 volatile uint32_t *ib
;
975 /* parse the WAIT_REG_MEM */
976 r
= r600_cs_packet_parse(p
, &wait_reg_mem
, p
->idx
);
980 /* check its a WAIT_REG_MEM */
981 if (wait_reg_mem
.type
!= PACKET_TYPE3
||
982 wait_reg_mem
.opcode
!= PACKET3_WAIT_REG_MEM
) {
983 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
987 wait_reg_mem_info
= radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 1);
988 /* bit 4 is reg (0) or mem (1) */
989 if (wait_reg_mem_info
& 0x10) {
990 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
993 /* waiting for value to be equal */
994 if ((wait_reg_mem_info
& 0x7) != 0x3) {
995 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
998 if ((radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 2) << 2) != AVIVO_D1MODE_VLINE_STATUS
) {
999 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
1003 if (radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 5) != AVIVO_D1MODE_VLINE_STAT
) {
1004 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
1008 /* jump over the NOP */
1009 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
+ wait_reg_mem
.count
+ 2);
1014 p
->idx
+= wait_reg_mem
.count
+ 2;
1015 p
->idx
+= p3reloc
.count
+ 2;
1017 header
= radeon_get_ib_value(p
, h_idx
);
1018 crtc_id
= radeon_get_ib_value(p
, h_idx
+ 2 + 7 + 1);
1019 reg
= CP_PACKET0_GET_REG(header
);
1021 obj
= drm_mode_object_find(p
->rdev
->ddev
, crtc_id
, DRM_MODE_OBJECT_CRTC
);
1023 DRM_ERROR("cannot find crtc %d\n", crtc_id
);
1026 crtc
= obj_to_crtc(obj
);
1027 radeon_crtc
= to_radeon_crtc(crtc
);
1028 crtc_id
= radeon_crtc
->crtc_id
;
1030 if (!crtc
->enabled
) {
1031 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
1032 ib
[h_idx
+ 2] = PACKET2(0);
1033 ib
[h_idx
+ 3] = PACKET2(0);
1034 ib
[h_idx
+ 4] = PACKET2(0);
1035 ib
[h_idx
+ 5] = PACKET2(0);
1036 ib
[h_idx
+ 6] = PACKET2(0);
1037 ib
[h_idx
+ 7] = PACKET2(0);
1038 ib
[h_idx
+ 8] = PACKET2(0);
1039 } else if (crtc_id
== 1) {
1041 case AVIVO_D1MODE_VLINE_START_END
:
1042 header
&= ~R600_CP_PACKET0_REG_MASK
;
1043 header
|= AVIVO_D2MODE_VLINE_START_END
>> 2;
1046 DRM_ERROR("unknown crtc reloc\n");
1050 ib
[h_idx
+ 4] = AVIVO_D2MODE_VLINE_STATUS
>> 2;
1056 static int r600_packet0_check(struct radeon_cs_parser
*p
,
1057 struct radeon_cs_packet
*pkt
,
1058 unsigned idx
, unsigned reg
)
1063 case AVIVO_D1MODE_VLINE_START_END
:
1064 r
= r600_cs_packet_parse_vline(p
);
1066 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1072 printk(KERN_ERR
"Forbidden register 0x%04X in cs at %d\n",
1079 static int r600_cs_parse_packet0(struct radeon_cs_parser
*p
,
1080 struct radeon_cs_packet
*pkt
)
1088 for (i
= 0; i
<= pkt
->count
; i
++, idx
++, reg
+= 4) {
1089 r
= r600_packet0_check(p
, pkt
, idx
, reg
);
1098 * r600_cs_check_reg() - check if register is authorized or not
1099 * @parser: parser structure holding parsing context
1100 * @reg: register we are testing
1101 * @idx: index into the cs buffer
1103 * This function will test against r600_reg_safe_bm and return 0
1104 * if register is safe. If register is not flag as safe this function
1105 * will test it against a list of register needind special handling.
1107 static int r600_cs_check_reg(struct radeon_cs_parser
*p
, u32 reg
, u32 idx
)
1109 struct r600_cs_track
*track
= (struct r600_cs_track
*)p
->track
;
1110 struct radeon_cs_reloc
*reloc
;
1115 if (i
>= ARRAY_SIZE(r600_reg_safe_bm
)) {
1116 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
1119 m
= 1 << ((reg
>> 2) & 31);
1120 if (!(r600_reg_safe_bm
[i
] & m
))
1124 /* force following reg to 0 in an attempt to disable out buffer
1125 * which will need us to better understand how it works to perform
1126 * security check on it (Jerome)
1128 case R_0288A8_SQ_ESGS_RING_ITEMSIZE
:
1129 case R_008C44_SQ_ESGS_RING_SIZE
:
1130 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE
:
1131 case R_008C54_SQ_ESTMP_RING_SIZE
:
1132 case R_0288C0_SQ_FBUF_RING_ITEMSIZE
:
1133 case R_008C74_SQ_FBUF_RING_SIZE
:
1134 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE
:
1135 case R_008C5C_SQ_GSTMP_RING_SIZE
:
1136 case R_0288AC_SQ_GSVS_RING_ITEMSIZE
:
1137 case R_008C4C_SQ_GSVS_RING_SIZE
:
1138 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE
:
1139 case R_008C6C_SQ_PSTMP_RING_SIZE
:
1140 case R_0288C4_SQ_REDUC_RING_ITEMSIZE
:
1141 case R_008C7C_SQ_REDUC_RING_SIZE
:
1142 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE
:
1143 case R_008C64_SQ_VSTMP_RING_SIZE
:
1144 case R_0288C8_SQ_GS_VERT_ITEMSIZE
:
1145 /* get value to populate the IB don't remove */
1146 tmp
=radeon_get_ib_value(p
, idx
);
1150 track
->sq_config
= radeon_get_ib_value(p
, idx
);
1152 case R_028800_DB_DEPTH_CONTROL
:
1153 track
->db_depth_control
= radeon_get_ib_value(p
, idx
);
1154 track
->db_dirty
= true;
1156 case R_028010_DB_DEPTH_INFO
:
1157 if (!(p
->cs_flags
& RADEON_CS_KEEP_TILING_FLAGS
) &&
1158 r600_cs_packet_next_is_pkt3_nop(p
)) {
1159 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1161 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
1165 track
->db_depth_info
= radeon_get_ib_value(p
, idx
);
1166 ib
[idx
] &= C_028010_ARRAY_MODE
;
1167 track
->db_depth_info
&= C_028010_ARRAY_MODE
;
1168 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
1169 ib
[idx
] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1
);
1170 track
->db_depth_info
|= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1
);
1172 ib
[idx
] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1
);
1173 track
->db_depth_info
|= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1
);
1176 track
->db_depth_info
= radeon_get_ib_value(p
, idx
);
1178 track
->db_dirty
= true;
1180 case R_028004_DB_DEPTH_VIEW
:
1181 track
->db_depth_view
= radeon_get_ib_value(p
, idx
);
1182 track
->db_dirty
= true;
1184 case R_028000_DB_DEPTH_SIZE
:
1185 track
->db_depth_size
= radeon_get_ib_value(p
, idx
);
1186 track
->db_depth_size_idx
= idx
;
1187 track
->db_dirty
= true;
1189 case R_028AB0_VGT_STRMOUT_EN
:
1190 track
->vgt_strmout_en
= radeon_get_ib_value(p
, idx
);
1191 track
->streamout_dirty
= true;
1193 case R_028B20_VGT_STRMOUT_BUFFER_EN
:
1194 track
->vgt_strmout_buffer_en
= radeon_get_ib_value(p
, idx
);
1195 track
->streamout_dirty
= true;
1197 case VGT_STRMOUT_BUFFER_BASE_0
:
1198 case VGT_STRMOUT_BUFFER_BASE_1
:
1199 case VGT_STRMOUT_BUFFER_BASE_2
:
1200 case VGT_STRMOUT_BUFFER_BASE_3
:
1201 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1203 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
1207 tmp
= (reg
- VGT_STRMOUT_BUFFER_BASE_0
) / 16;
1208 track
->vgt_strmout_bo_offset
[tmp
] = radeon_get_ib_value(p
, idx
) << 8;
1209 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1210 track
->vgt_strmout_bo
[tmp
] = reloc
->robj
;
1211 track
->vgt_strmout_bo_mc
[tmp
] = reloc
->lobj
.gpu_offset
;
1212 track
->streamout_dirty
= true;
1214 case VGT_STRMOUT_BUFFER_SIZE_0
:
1215 case VGT_STRMOUT_BUFFER_SIZE_1
:
1216 case VGT_STRMOUT_BUFFER_SIZE_2
:
1217 case VGT_STRMOUT_BUFFER_SIZE_3
:
1218 tmp
= (reg
- VGT_STRMOUT_BUFFER_SIZE_0
) / 16;
1219 /* size in register is DWs, convert to bytes */
1220 track
->vgt_strmout_size
[tmp
] = radeon_get_ib_value(p
, idx
) * 4;
1221 track
->streamout_dirty
= true;
1224 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1226 dev_warn(p
->dev
, "missing reloc for CP_COHER_BASE "
1230 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1232 case R_028238_CB_TARGET_MASK
:
1233 track
->cb_target_mask
= radeon_get_ib_value(p
, idx
);
1234 track
->cb_dirty
= true;
1236 case R_02823C_CB_SHADER_MASK
:
1237 track
->cb_shader_mask
= radeon_get_ib_value(p
, idx
);
1239 case R_028C04_PA_SC_AA_CONFIG
:
1240 tmp
= G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p
, idx
));
1241 track
->log_nsamples
= tmp
;
1242 track
->nsamples
= 1 << tmp
;
1243 track
->cb_dirty
= true;
1245 case R_028808_CB_COLOR_CONTROL
:
1246 tmp
= G_028808_SPECIAL_OP(radeon_get_ib_value(p
, idx
));
1247 track
->is_resolve
= tmp
== V_028808_SPECIAL_RESOLVE_BOX
;
1248 track
->cb_dirty
= true;
1250 case R_0280A0_CB_COLOR0_INFO
:
1251 case R_0280A4_CB_COLOR1_INFO
:
1252 case R_0280A8_CB_COLOR2_INFO
:
1253 case R_0280AC_CB_COLOR3_INFO
:
1254 case R_0280B0_CB_COLOR4_INFO
:
1255 case R_0280B4_CB_COLOR5_INFO
:
1256 case R_0280B8_CB_COLOR6_INFO
:
1257 case R_0280BC_CB_COLOR7_INFO
:
1258 if (!(p
->cs_flags
& RADEON_CS_KEEP_TILING_FLAGS
) &&
1259 r600_cs_packet_next_is_pkt3_nop(p
)) {
1260 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1262 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
1265 tmp
= (reg
- R_0280A0_CB_COLOR0_INFO
) / 4;
1266 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
1267 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
1268 ib
[idx
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1
);
1269 track
->cb_color_info
[tmp
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1
);
1270 } else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
) {
1271 ib
[idx
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1
);
1272 track
->cb_color_info
[tmp
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1
);
1275 tmp
= (reg
- R_0280A0_CB_COLOR0_INFO
) / 4;
1276 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
1278 track
->cb_dirty
= true;
1280 case R_028080_CB_COLOR0_VIEW
:
1281 case R_028084_CB_COLOR1_VIEW
:
1282 case R_028088_CB_COLOR2_VIEW
:
1283 case R_02808C_CB_COLOR3_VIEW
:
1284 case R_028090_CB_COLOR4_VIEW
:
1285 case R_028094_CB_COLOR5_VIEW
:
1286 case R_028098_CB_COLOR6_VIEW
:
1287 case R_02809C_CB_COLOR7_VIEW
:
1288 tmp
= (reg
- R_028080_CB_COLOR0_VIEW
) / 4;
1289 track
->cb_color_view
[tmp
] = radeon_get_ib_value(p
, idx
);
1290 track
->cb_dirty
= true;
1292 case R_028060_CB_COLOR0_SIZE
:
1293 case R_028064_CB_COLOR1_SIZE
:
1294 case R_028068_CB_COLOR2_SIZE
:
1295 case R_02806C_CB_COLOR3_SIZE
:
1296 case R_028070_CB_COLOR4_SIZE
:
1297 case R_028074_CB_COLOR5_SIZE
:
1298 case R_028078_CB_COLOR6_SIZE
:
1299 case R_02807C_CB_COLOR7_SIZE
:
1300 tmp
= (reg
- R_028060_CB_COLOR0_SIZE
) / 4;
1301 track
->cb_color_size
[tmp
] = radeon_get_ib_value(p
, idx
);
1302 track
->cb_color_size_idx
[tmp
] = idx
;
1303 track
->cb_dirty
= true;
1305 /* This register were added late, there is userspace
1306 * which does provide relocation for those but set
1307 * 0 offset. In order to avoid breaking old userspace
1308 * we detect this and set address to point to last
1309 * CB_COLOR0_BASE, note that if userspace doesn't set
1310 * CB_COLOR0_BASE before this register we will report
1311 * error. Old userspace always set CB_COLOR0_BASE
1312 * before any of this.
1314 case R_0280E0_CB_COLOR0_FRAG
:
1315 case R_0280E4_CB_COLOR1_FRAG
:
1316 case R_0280E8_CB_COLOR2_FRAG
:
1317 case R_0280EC_CB_COLOR3_FRAG
:
1318 case R_0280F0_CB_COLOR4_FRAG
:
1319 case R_0280F4_CB_COLOR5_FRAG
:
1320 case R_0280F8_CB_COLOR6_FRAG
:
1321 case R_0280FC_CB_COLOR7_FRAG
:
1322 tmp
= (reg
- R_0280E0_CB_COLOR0_FRAG
) / 4;
1323 if (!r600_cs_packet_next_is_pkt3_nop(p
)) {
1324 if (!track
->cb_color_base_last
[tmp
]) {
1325 dev_err(p
->dev
, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg
);
1328 track
->cb_color_frag_bo
[tmp
] = track
->cb_color_bo
[tmp
];
1329 track
->cb_color_frag_offset
[tmp
] = track
->cb_color_bo_offset
[tmp
];
1330 ib
[idx
] = track
->cb_color_base_last
[tmp
];
1332 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1334 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
1337 track
->cb_color_frag_bo
[tmp
] = reloc
->robj
;
1338 track
->cb_color_frag_offset
[tmp
] = (u64
)ib
[idx
] << 8;
1339 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1341 if (G_0280A0_TILE_MODE(track
->cb_color_info
[tmp
])) {
1342 track
->cb_dirty
= true;
1345 case R_0280C0_CB_COLOR0_TILE
:
1346 case R_0280C4_CB_COLOR1_TILE
:
1347 case R_0280C8_CB_COLOR2_TILE
:
1348 case R_0280CC_CB_COLOR3_TILE
:
1349 case R_0280D0_CB_COLOR4_TILE
:
1350 case R_0280D4_CB_COLOR5_TILE
:
1351 case R_0280D8_CB_COLOR6_TILE
:
1352 case R_0280DC_CB_COLOR7_TILE
:
1353 tmp
= (reg
- R_0280C0_CB_COLOR0_TILE
) / 4;
1354 if (!r600_cs_packet_next_is_pkt3_nop(p
)) {
1355 if (!track
->cb_color_base_last
[tmp
]) {
1356 dev_err(p
->dev
, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg
);
1359 track
->cb_color_tile_bo
[tmp
] = track
->cb_color_bo
[tmp
];
1360 track
->cb_color_tile_offset
[tmp
] = track
->cb_color_bo_offset
[tmp
];
1361 ib
[idx
] = track
->cb_color_base_last
[tmp
];
1363 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1365 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
1368 track
->cb_color_tile_bo
[tmp
] = reloc
->robj
;
1369 track
->cb_color_tile_offset
[tmp
] = (u64
)ib
[idx
] << 8;
1370 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1372 if (G_0280A0_TILE_MODE(track
->cb_color_info
[tmp
])) {
1373 track
->cb_dirty
= true;
1376 case R_028100_CB_COLOR0_MASK
:
1377 case R_028104_CB_COLOR1_MASK
:
1378 case R_028108_CB_COLOR2_MASK
:
1379 case R_02810C_CB_COLOR3_MASK
:
1380 case R_028110_CB_COLOR4_MASK
:
1381 case R_028114_CB_COLOR5_MASK
:
1382 case R_028118_CB_COLOR6_MASK
:
1383 case R_02811C_CB_COLOR7_MASK
:
1384 tmp
= (reg
- R_028100_CB_COLOR0_MASK
) / 4;
1385 track
->cb_color_mask
[tmp
] = radeon_get_ib_value(p
, idx
);
1386 if (G_0280A0_TILE_MODE(track
->cb_color_info
[tmp
])) {
1387 track
->cb_dirty
= true;
1390 case CB_COLOR0_BASE
:
1391 case CB_COLOR1_BASE
:
1392 case CB_COLOR2_BASE
:
1393 case CB_COLOR3_BASE
:
1394 case CB_COLOR4_BASE
:
1395 case CB_COLOR5_BASE
:
1396 case CB_COLOR6_BASE
:
1397 case CB_COLOR7_BASE
:
1398 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1400 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
1404 tmp
= (reg
- CB_COLOR0_BASE
) / 4;
1405 track
->cb_color_bo_offset
[tmp
] = radeon_get_ib_value(p
, idx
) << 8;
1406 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1407 track
->cb_color_base_last
[tmp
] = ib
[idx
];
1408 track
->cb_color_bo
[tmp
] = reloc
->robj
;
1409 track
->cb_color_bo_mc
[tmp
] = reloc
->lobj
.gpu_offset
;
1410 track
->cb_dirty
= true;
1413 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1415 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
1419 track
->db_offset
= radeon_get_ib_value(p
, idx
) << 8;
1420 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1421 track
->db_bo
= reloc
->robj
;
1422 track
->db_bo_mc
= reloc
->lobj
.gpu_offset
;
1423 track
->db_dirty
= true;
1425 case DB_HTILE_DATA_BASE
:
1426 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1428 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
1432 track
->htile_offset
= radeon_get_ib_value(p
, idx
) << 8;
1433 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1434 track
->htile_bo
= reloc
->robj
;
1435 track
->db_dirty
= true;
1437 case DB_HTILE_SURFACE
:
1438 track
->htile_surface
= radeon_get_ib_value(p
, idx
);
1439 /* force 8x8 htile width and height */
1441 track
->db_dirty
= true;
1443 case SQ_PGM_START_FS
:
1444 case SQ_PGM_START_ES
:
1445 case SQ_PGM_START_VS
:
1446 case SQ_PGM_START_GS
:
1447 case SQ_PGM_START_PS
:
1448 case SQ_ALU_CONST_CACHE_GS_0
:
1449 case SQ_ALU_CONST_CACHE_GS_1
:
1450 case SQ_ALU_CONST_CACHE_GS_2
:
1451 case SQ_ALU_CONST_CACHE_GS_3
:
1452 case SQ_ALU_CONST_CACHE_GS_4
:
1453 case SQ_ALU_CONST_CACHE_GS_5
:
1454 case SQ_ALU_CONST_CACHE_GS_6
:
1455 case SQ_ALU_CONST_CACHE_GS_7
:
1456 case SQ_ALU_CONST_CACHE_GS_8
:
1457 case SQ_ALU_CONST_CACHE_GS_9
:
1458 case SQ_ALU_CONST_CACHE_GS_10
:
1459 case SQ_ALU_CONST_CACHE_GS_11
:
1460 case SQ_ALU_CONST_CACHE_GS_12
:
1461 case SQ_ALU_CONST_CACHE_GS_13
:
1462 case SQ_ALU_CONST_CACHE_GS_14
:
1463 case SQ_ALU_CONST_CACHE_GS_15
:
1464 case SQ_ALU_CONST_CACHE_PS_0
:
1465 case SQ_ALU_CONST_CACHE_PS_1
:
1466 case SQ_ALU_CONST_CACHE_PS_2
:
1467 case SQ_ALU_CONST_CACHE_PS_3
:
1468 case SQ_ALU_CONST_CACHE_PS_4
:
1469 case SQ_ALU_CONST_CACHE_PS_5
:
1470 case SQ_ALU_CONST_CACHE_PS_6
:
1471 case SQ_ALU_CONST_CACHE_PS_7
:
1472 case SQ_ALU_CONST_CACHE_PS_8
:
1473 case SQ_ALU_CONST_CACHE_PS_9
:
1474 case SQ_ALU_CONST_CACHE_PS_10
:
1475 case SQ_ALU_CONST_CACHE_PS_11
:
1476 case SQ_ALU_CONST_CACHE_PS_12
:
1477 case SQ_ALU_CONST_CACHE_PS_13
:
1478 case SQ_ALU_CONST_CACHE_PS_14
:
1479 case SQ_ALU_CONST_CACHE_PS_15
:
1480 case SQ_ALU_CONST_CACHE_VS_0
:
1481 case SQ_ALU_CONST_CACHE_VS_1
:
1482 case SQ_ALU_CONST_CACHE_VS_2
:
1483 case SQ_ALU_CONST_CACHE_VS_3
:
1484 case SQ_ALU_CONST_CACHE_VS_4
:
1485 case SQ_ALU_CONST_CACHE_VS_5
:
1486 case SQ_ALU_CONST_CACHE_VS_6
:
1487 case SQ_ALU_CONST_CACHE_VS_7
:
1488 case SQ_ALU_CONST_CACHE_VS_8
:
1489 case SQ_ALU_CONST_CACHE_VS_9
:
1490 case SQ_ALU_CONST_CACHE_VS_10
:
1491 case SQ_ALU_CONST_CACHE_VS_11
:
1492 case SQ_ALU_CONST_CACHE_VS_12
:
1493 case SQ_ALU_CONST_CACHE_VS_13
:
1494 case SQ_ALU_CONST_CACHE_VS_14
:
1495 case SQ_ALU_CONST_CACHE_VS_15
:
1496 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1498 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
1502 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1504 case SX_MEMORY_EXPORT_BASE
:
1505 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1507 dev_warn(p
->dev
, "bad SET_CONFIG_REG "
1511 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1514 track
->sx_misc_kill_all_prims
= (radeon_get_ib_value(p
, idx
) & 0x1) != 0;
1517 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
1523 unsigned r600_mip_minify(unsigned size
, unsigned level
)
1527 val
= max(1U, size
>> level
);
1529 val
= roundup_pow_of_two(val
);
1533 static void r600_texture_size(unsigned nfaces
, unsigned blevel
, unsigned llevel
,
1534 unsigned w0
, unsigned h0
, unsigned d0
, unsigned nsamples
, unsigned format
,
1535 unsigned block_align
, unsigned height_align
, unsigned base_align
,
1536 unsigned *l0_size
, unsigned *mipmap_size
)
1538 unsigned offset
, i
, level
;
1539 unsigned width
, height
, depth
, size
;
1542 unsigned nlevels
= llevel
- blevel
+ 1;
1545 blocksize
= r600_fmt_get_blocksize(format
);
1547 w0
= r600_mip_minify(w0
, 0);
1548 h0
= r600_mip_minify(h0
, 0);
1549 d0
= r600_mip_minify(d0
, 0);
1550 for(i
= 0, offset
= 0, level
= blevel
; i
< nlevels
; i
++, level
++) {
1551 width
= r600_mip_minify(w0
, i
);
1552 nbx
= r600_fmt_get_nblocksx(format
, width
);
1554 nbx
= round_up(nbx
, block_align
);
1556 height
= r600_mip_minify(h0
, i
);
1557 nby
= r600_fmt_get_nblocksy(format
, height
);
1558 nby
= round_up(nby
, height_align
);
1560 depth
= r600_mip_minify(d0
, i
);
1562 size
= nbx
* nby
* blocksize
* nsamples
;
1571 if (i
== 0 || i
== 1)
1572 offset
= round_up(offset
, base_align
);
1576 *mipmap_size
= offset
;
1578 *mipmap_size
= *l0_size
;
1580 *mipmap_size
-= *l0_size
;
1584 * r600_check_texture_resource() - check if register is authorized or not
1585 * @p: parser structure holding parsing context
1586 * @idx: index into the cs buffer
1587 * @texture: texture's bo structure
1588 * @mipmap: mipmap's bo structure
1590 * This function will check that the resource has valid field and that
1591 * the texture and mipmap bo object are big enough to cover this resource.
1593 static int r600_check_texture_resource(struct radeon_cs_parser
*p
, u32 idx
,
1594 struct radeon_bo
*texture
,
1595 struct radeon_bo
*mipmap
,
1600 struct r600_cs_track
*track
= p
->track
;
1601 u32 dim
, nfaces
, llevel
, blevel
, w0
, h0
, d0
;
1602 u32 word0
, word1
, l0_size
, mipmap_size
, word2
, word3
, word4
, word5
;
1603 u32 height_align
, pitch
, pitch_align
, depth_align
;
1606 struct array_mode_checker array_check
;
1610 /* on legacy kernel we don't perform advanced check */
1611 if (p
->rdev
== NULL
)
1614 /* convert to bytes */
1618 word0
= radeon_get_ib_value(p
, idx
+ 0);
1619 if (!(p
->cs_flags
& RADEON_CS_KEEP_TILING_FLAGS
)) {
1620 if (tiling_flags
& RADEON_TILING_MACRO
)
1621 word0
|= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1
);
1622 else if (tiling_flags
& RADEON_TILING_MICRO
)
1623 word0
|= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1
);
1625 word1
= radeon_get_ib_value(p
, idx
+ 1);
1626 word2
= radeon_get_ib_value(p
, idx
+ 2) << 8;
1627 word3
= radeon_get_ib_value(p
, idx
+ 3) << 8;
1628 word4
= radeon_get_ib_value(p
, idx
+ 4);
1629 word5
= radeon_get_ib_value(p
, idx
+ 5);
1630 dim
= G_038000_DIM(word0
);
1631 w0
= G_038000_TEX_WIDTH(word0
) + 1;
1632 pitch
= (G_038000_PITCH(word0
) + 1) * 8;
1633 h0
= G_038004_TEX_HEIGHT(word1
) + 1;
1634 d0
= G_038004_TEX_DEPTH(word1
);
1635 format
= G_038004_DATA_FORMAT(word1
);
1636 blevel
= G_038010_BASE_LEVEL(word4
);
1637 llevel
= G_038014_LAST_LEVEL(word5
);
1638 /* pitch in texels */
1639 array_check
.array_mode
= G_038000_TILE_MODE(word0
);
1640 array_check
.group_size
= track
->group_size
;
1641 array_check
.nbanks
= track
->nbanks
;
1642 array_check
.npipes
= track
->npipes
;
1643 array_check
.nsamples
= 1;
1644 array_check
.blocksize
= r600_fmt_get_blocksize(format
);
1648 case V_038000_SQ_TEX_DIM_1D
:
1649 case V_038000_SQ_TEX_DIM_2D
:
1650 case V_038000_SQ_TEX_DIM_3D
:
1652 case V_038000_SQ_TEX_DIM_CUBEMAP
:
1653 if (p
->family
>= CHIP_RV770
)
1658 case V_038000_SQ_TEX_DIM_1D_ARRAY
:
1659 case V_038000_SQ_TEX_DIM_2D_ARRAY
:
1662 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA
:
1665 case V_038000_SQ_TEX_DIM_2D_MSAA
:
1666 array_check
.nsamples
= 1 << llevel
;
1670 dev_warn(p
->dev
, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0
));
1673 if (!r600_fmt_is_valid_texture(format
, p
->family
)) {
1674 dev_warn(p
->dev
, "%s:%d texture invalid format %d\n",
1675 __func__
, __LINE__
, format
);
1679 if (r600_get_array_mode_alignment(&array_check
,
1680 &pitch_align
, &height_align
, &depth_align
, &base_align
)) {
1681 dev_warn(p
->dev
, "%s:%d tex array mode (%d) invalid\n",
1682 __func__
, __LINE__
, G_038000_TILE_MODE(word0
));
1686 /* XXX check height as well... */
1688 if (!IS_ALIGNED(pitch
, pitch_align
)) {
1689 dev_warn(p
->dev
, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
1690 __func__
, __LINE__
, pitch
, pitch_align
, G_038000_TILE_MODE(word0
));
1693 if (!IS_ALIGNED(base_offset
, base_align
)) {
1694 dev_warn(p
->dev
, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
1695 __func__
, __LINE__
, base_offset
, base_align
, G_038000_TILE_MODE(word0
));
1698 if (!IS_ALIGNED(mip_offset
, base_align
)) {
1699 dev_warn(p
->dev
, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
1700 __func__
, __LINE__
, mip_offset
, base_align
, G_038000_TILE_MODE(word0
));
1704 if (blevel
> llevel
) {
1705 dev_warn(p
->dev
, "texture blevel %d > llevel %d\n",
1709 barray
= G_038014_BASE_ARRAY(word5
);
1710 larray
= G_038014_LAST_ARRAY(word5
);
1712 nfaces
= larray
- barray
+ 1;
1714 r600_texture_size(nfaces
, blevel
, llevel
, w0
, h0
, d0
, array_check
.nsamples
, format
,
1715 pitch_align
, height_align
, base_align
,
1716 &l0_size
, &mipmap_size
);
1717 /* using get ib will give us the offset into the texture bo */
1718 if ((l0_size
+ word2
) > radeon_bo_size(texture
)) {
1719 dev_warn(p
->dev
, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
1720 w0
, h0
, pitch_align
, height_align
,
1721 array_check
.array_mode
, format
, word2
,
1722 l0_size
, radeon_bo_size(texture
));
1723 dev_warn(p
->dev
, "alignments %d %d %d %lld\n", pitch
, pitch_align
, height_align
, base_align
);
1726 /* using get ib will give us the offset into the mipmap bo */
1727 if ((mipmap_size
+ word3
) > radeon_bo_size(mipmap
)) {
1728 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1729 w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
1734 static bool r600_is_safe_reg(struct radeon_cs_parser
*p
, u32 reg
, u32 idx
)
1739 if (i
>= ARRAY_SIZE(r600_reg_safe_bm
)) {
1740 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
1743 m
= 1 << ((reg
>> 2) & 31);
1744 if (!(r600_reg_safe_bm
[i
] & m
))
1746 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
1750 static int r600_packet3_check(struct radeon_cs_parser
*p
,
1751 struct radeon_cs_packet
*pkt
)
1753 struct radeon_cs_reloc
*reloc
;
1754 struct r600_cs_track
*track
;
1758 unsigned start_reg
, end_reg
, reg
;
1762 track
= (struct r600_cs_track
*)p
->track
;
1765 idx_value
= radeon_get_ib_value(p
, idx
);
1767 switch (pkt
->opcode
) {
1768 case PACKET3_SET_PREDICATION
:
1774 if (pkt
->count
!= 1) {
1775 DRM_ERROR("bad SET PREDICATION\n");
1779 tmp
= radeon_get_ib_value(p
, idx
+ 1);
1780 pred_op
= (tmp
>> 16) & 0x7;
1782 /* for the clear predicate operation */
1787 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op
);
1791 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1793 DRM_ERROR("bad SET PREDICATION\n");
1797 offset
= reloc
->lobj
.gpu_offset
+
1798 (idx_value
& 0xfffffff0) +
1799 ((u64
)(tmp
& 0xff) << 32);
1801 ib
[idx
+ 0] = offset
;
1802 ib
[idx
+ 1] = (tmp
& 0xffffff00) | (upper_32_bits(offset
) & 0xff);
1806 case PACKET3_START_3D_CMDBUF
:
1807 if (p
->family
>= CHIP_RV770
|| pkt
->count
) {
1808 DRM_ERROR("bad START_3D\n");
1812 case PACKET3_CONTEXT_CONTROL
:
1813 if (pkt
->count
!= 1) {
1814 DRM_ERROR("bad CONTEXT_CONTROL\n");
1818 case PACKET3_INDEX_TYPE
:
1819 case PACKET3_NUM_INSTANCES
:
1821 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1825 case PACKET3_DRAW_INDEX
:
1828 if (pkt
->count
!= 3) {
1829 DRM_ERROR("bad DRAW_INDEX\n");
1832 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1834 DRM_ERROR("bad DRAW_INDEX\n");
1838 offset
= reloc
->lobj
.gpu_offset
+
1840 ((u64
)(radeon_get_ib_value(p
, idx
+1) & 0xff) << 32);
1843 ib
[idx
+1] = upper_32_bits(offset
) & 0xff;
1845 r
= r600_cs_track_check(p
);
1847 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1852 case PACKET3_DRAW_INDEX_AUTO
:
1853 if (pkt
->count
!= 1) {
1854 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1857 r
= r600_cs_track_check(p
);
1859 dev_warn(p
->dev
, "%s:%d invalid cmd stream %d\n", __func__
, __LINE__
, idx
);
1863 case PACKET3_DRAW_INDEX_IMMD_BE
:
1864 case PACKET3_DRAW_INDEX_IMMD
:
1865 if (pkt
->count
< 2) {
1866 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1869 r
= r600_cs_track_check(p
);
1871 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1875 case PACKET3_WAIT_REG_MEM
:
1876 if (pkt
->count
!= 5) {
1877 DRM_ERROR("bad WAIT_REG_MEM\n");
1880 /* bit 4 is reg (0) or mem (1) */
1881 if (idx_value
& 0x10) {
1884 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1886 DRM_ERROR("bad WAIT_REG_MEM\n");
1890 offset
= reloc
->lobj
.gpu_offset
+
1891 (radeon_get_ib_value(p
, idx
+1) & 0xfffffff0) +
1892 ((u64
)(radeon_get_ib_value(p
, idx
+2) & 0xff) << 32);
1894 ib
[idx
+1] = (ib
[idx
+1] & 0x3) | (offset
& 0xfffffff0);
1895 ib
[idx
+2] = upper_32_bits(offset
) & 0xff;
1898 case PACKET3_CP_DMA
:
1902 if (pkt
->count
!= 4) {
1903 DRM_ERROR("bad CP DMA\n");
1906 command
= radeon_get_ib_value(p
, idx
+4);
1907 size
= command
& 0x1fffff;
1908 if (command
& PACKET3_CP_DMA_CMD_SAS
) {
1909 /* src address space is register */
1910 DRM_ERROR("CP DMA SAS not supported\n");
1913 if (command
& PACKET3_CP_DMA_CMD_SAIC
) {
1914 DRM_ERROR("CP DMA SAIC only supported for registers\n");
1917 /* src address space is memory */
1918 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1920 DRM_ERROR("bad CP DMA SRC\n");
1924 tmp
= radeon_get_ib_value(p
, idx
) +
1925 ((u64
)(radeon_get_ib_value(p
, idx
+1) & 0xff) << 32);
1927 offset
= reloc
->lobj
.gpu_offset
+ tmp
;
1929 if ((tmp
+ size
) > radeon_bo_size(reloc
->robj
)) {
1930 dev_warn(p
->dev
, "CP DMA src buffer too small (%llu %lu)\n",
1931 tmp
+ size
, radeon_bo_size(reloc
->robj
));
1936 ib
[idx
+1] = (ib
[idx
+1] & 0xffffff00) | (upper_32_bits(offset
) & 0xff);
1938 if (command
& PACKET3_CP_DMA_CMD_DAS
) {
1939 /* dst address space is register */
1940 DRM_ERROR("CP DMA DAS not supported\n");
1943 /* dst address space is memory */
1944 if (command
& PACKET3_CP_DMA_CMD_DAIC
) {
1945 DRM_ERROR("CP DMA DAIC only supported for registers\n");
1948 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1950 DRM_ERROR("bad CP DMA DST\n");
1954 tmp
= radeon_get_ib_value(p
, idx
+2) +
1955 ((u64
)(radeon_get_ib_value(p
, idx
+3) & 0xff) << 32);
1957 offset
= reloc
->lobj
.gpu_offset
+ tmp
;
1959 if ((tmp
+ size
) > radeon_bo_size(reloc
->robj
)) {
1960 dev_warn(p
->dev
, "CP DMA dst buffer too small (%llu %lu)\n",
1961 tmp
+ size
, radeon_bo_size(reloc
->robj
));
1966 ib
[idx
+3] = upper_32_bits(offset
) & 0xff;
1970 case PACKET3_SURFACE_SYNC
:
1971 if (pkt
->count
!= 3) {
1972 DRM_ERROR("bad SURFACE_SYNC\n");
1975 /* 0xffffffff/0x0 is flush all cache flag */
1976 if (radeon_get_ib_value(p
, idx
+ 1) != 0xffffffff ||
1977 radeon_get_ib_value(p
, idx
+ 2) != 0) {
1978 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1980 DRM_ERROR("bad SURFACE_SYNC\n");
1983 ib
[idx
+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1986 case PACKET3_EVENT_WRITE
:
1987 if (pkt
->count
!= 2 && pkt
->count
!= 0) {
1988 DRM_ERROR("bad EVENT_WRITE\n");
1994 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1996 DRM_ERROR("bad EVENT_WRITE\n");
1999 offset
= reloc
->lobj
.gpu_offset
+
2000 (radeon_get_ib_value(p
, idx
+1) & 0xfffffff8) +
2001 ((u64
)(radeon_get_ib_value(p
, idx
+2) & 0xff) << 32);
2003 ib
[idx
+1] = offset
& 0xfffffff8;
2004 ib
[idx
+2] = upper_32_bits(offset
) & 0xff;
2007 case PACKET3_EVENT_WRITE_EOP
:
2011 if (pkt
->count
!= 4) {
2012 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2015 r
= r600_cs_packet_next_reloc(p
, &reloc
);
2017 DRM_ERROR("bad EVENT_WRITE\n");
2021 offset
= reloc
->lobj
.gpu_offset
+
2022 (radeon_get_ib_value(p
, idx
+1) & 0xfffffffc) +
2023 ((u64
)(radeon_get_ib_value(p
, idx
+2) & 0xff) << 32);
2025 ib
[idx
+1] = offset
& 0xfffffffc;
2026 ib
[idx
+2] = (ib
[idx
+2] & 0xffffff00) | (upper_32_bits(offset
) & 0xff);
2029 case PACKET3_SET_CONFIG_REG
:
2030 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONFIG_REG_OFFSET
;
2031 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
2032 if ((start_reg
< PACKET3_SET_CONFIG_REG_OFFSET
) ||
2033 (start_reg
>= PACKET3_SET_CONFIG_REG_END
) ||
2034 (end_reg
>= PACKET3_SET_CONFIG_REG_END
)) {
2035 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2038 for (i
= 0; i
< pkt
->count
; i
++) {
2039 reg
= start_reg
+ (4 * i
);
2040 r
= r600_cs_check_reg(p
, reg
, idx
+1+i
);
2045 case PACKET3_SET_CONTEXT_REG
:
2046 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONTEXT_REG_OFFSET
;
2047 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
2048 if ((start_reg
< PACKET3_SET_CONTEXT_REG_OFFSET
) ||
2049 (start_reg
>= PACKET3_SET_CONTEXT_REG_END
) ||
2050 (end_reg
>= PACKET3_SET_CONTEXT_REG_END
)) {
2051 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
2054 for (i
= 0; i
< pkt
->count
; i
++) {
2055 reg
= start_reg
+ (4 * i
);
2056 r
= r600_cs_check_reg(p
, reg
, idx
+1+i
);
2061 case PACKET3_SET_RESOURCE
:
2062 if (pkt
->count
% 7) {
2063 DRM_ERROR("bad SET_RESOURCE\n");
2066 start_reg
= (idx_value
<< 2) + PACKET3_SET_RESOURCE_OFFSET
;
2067 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
2068 if ((start_reg
< PACKET3_SET_RESOURCE_OFFSET
) ||
2069 (start_reg
>= PACKET3_SET_RESOURCE_END
) ||
2070 (end_reg
>= PACKET3_SET_RESOURCE_END
)) {
2071 DRM_ERROR("bad SET_RESOURCE\n");
2074 for (i
= 0; i
< (pkt
->count
/ 7); i
++) {
2075 struct radeon_bo
*texture
, *mipmap
;
2076 u32 size
, offset
, base_offset
, mip_offset
;
2078 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p
, idx
+(i
*7)+6+1))) {
2079 case SQ_TEX_VTX_VALID_TEXTURE
:
2081 r
= r600_cs_packet_next_reloc(p
, &reloc
);
2083 DRM_ERROR("bad SET_RESOURCE\n");
2086 base_offset
= (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
2087 if (!(p
->cs_flags
& RADEON_CS_KEEP_TILING_FLAGS
)) {
2088 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
2089 ib
[idx
+1+(i
*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1
);
2090 else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
2091 ib
[idx
+1+(i
*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1
);
2093 texture
= reloc
->robj
;
2095 r
= r600_cs_packet_next_reloc(p
, &reloc
);
2097 DRM_ERROR("bad SET_RESOURCE\n");
2100 mip_offset
= (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
2101 mipmap
= reloc
->robj
;
2102 r
= r600_check_texture_resource(p
, idx
+(i
*7)+1,
2104 base_offset
+ radeon_get_ib_value(p
, idx
+1+(i
*7)+2),
2105 mip_offset
+ radeon_get_ib_value(p
, idx
+1+(i
*7)+3),
2106 reloc
->lobj
.tiling_flags
);
2109 ib
[idx
+1+(i
*7)+2] += base_offset
;
2110 ib
[idx
+1+(i
*7)+3] += mip_offset
;
2112 case SQ_TEX_VTX_VALID_BUFFER
:
2116 r
= r600_cs_packet_next_reloc(p
, &reloc
);
2118 DRM_ERROR("bad SET_RESOURCE\n");
2121 offset
= radeon_get_ib_value(p
, idx
+1+(i
*7)+0);
2122 size
= radeon_get_ib_value(p
, idx
+1+(i
*7)+1) + 1;
2123 if (p
->rdev
&& (size
+ offset
) > radeon_bo_size(reloc
->robj
)) {
2124 /* force size to size of the buffer */
2125 dev_warn(p
->dev
, "vbo resource seems too big (%d) for the bo (%ld)\n",
2126 size
+ offset
, radeon_bo_size(reloc
->robj
));
2127 ib
[idx
+1+(i
*7)+1] = radeon_bo_size(reloc
->robj
) - offset
;
2130 offset64
= reloc
->lobj
.gpu_offset
+ offset
;
2131 ib
[idx
+1+(i
*8)+0] = offset64
;
2132 ib
[idx
+1+(i
*8)+2] = (ib
[idx
+1+(i
*8)+2] & 0xffffff00) |
2133 (upper_32_bits(offset64
) & 0xff);
2136 case SQ_TEX_VTX_INVALID_TEXTURE
:
2137 case SQ_TEX_VTX_INVALID_BUFFER
:
2139 DRM_ERROR("bad SET_RESOURCE\n");
2144 case PACKET3_SET_ALU_CONST
:
2145 if (track
->sq_config
& DX9_CONSTS
) {
2146 start_reg
= (idx_value
<< 2) + PACKET3_SET_ALU_CONST_OFFSET
;
2147 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
2148 if ((start_reg
< PACKET3_SET_ALU_CONST_OFFSET
) ||
2149 (start_reg
>= PACKET3_SET_ALU_CONST_END
) ||
2150 (end_reg
>= PACKET3_SET_ALU_CONST_END
)) {
2151 DRM_ERROR("bad SET_ALU_CONST\n");
2156 case PACKET3_SET_BOOL_CONST
:
2157 start_reg
= (idx_value
<< 2) + PACKET3_SET_BOOL_CONST_OFFSET
;
2158 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
2159 if ((start_reg
< PACKET3_SET_BOOL_CONST_OFFSET
) ||
2160 (start_reg
>= PACKET3_SET_BOOL_CONST_END
) ||
2161 (end_reg
>= PACKET3_SET_BOOL_CONST_END
)) {
2162 DRM_ERROR("bad SET_BOOL_CONST\n");
2166 case PACKET3_SET_LOOP_CONST
:
2167 start_reg
= (idx_value
<< 2) + PACKET3_SET_LOOP_CONST_OFFSET
;
2168 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
2169 if ((start_reg
< PACKET3_SET_LOOP_CONST_OFFSET
) ||
2170 (start_reg
>= PACKET3_SET_LOOP_CONST_END
) ||
2171 (end_reg
>= PACKET3_SET_LOOP_CONST_END
)) {
2172 DRM_ERROR("bad SET_LOOP_CONST\n");
2176 case PACKET3_SET_CTL_CONST
:
2177 start_reg
= (idx_value
<< 2) + PACKET3_SET_CTL_CONST_OFFSET
;
2178 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
2179 if ((start_reg
< PACKET3_SET_CTL_CONST_OFFSET
) ||
2180 (start_reg
>= PACKET3_SET_CTL_CONST_END
) ||
2181 (end_reg
>= PACKET3_SET_CTL_CONST_END
)) {
2182 DRM_ERROR("bad SET_CTL_CONST\n");
2186 case PACKET3_SET_SAMPLER
:
2187 if (pkt
->count
% 3) {
2188 DRM_ERROR("bad SET_SAMPLER\n");
2191 start_reg
= (idx_value
<< 2) + PACKET3_SET_SAMPLER_OFFSET
;
2192 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
2193 if ((start_reg
< PACKET3_SET_SAMPLER_OFFSET
) ||
2194 (start_reg
>= PACKET3_SET_SAMPLER_END
) ||
2195 (end_reg
>= PACKET3_SET_SAMPLER_END
)) {
2196 DRM_ERROR("bad SET_SAMPLER\n");
2200 case PACKET3_STRMOUT_BASE_UPDATE
:
2201 /* RS780 and RS880 also need this */
2202 if (p
->family
< CHIP_RS780
) {
2203 DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
2206 if (pkt
->count
!= 1) {
2207 DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
2210 if (idx_value
> 3) {
2211 DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
2217 r
= r600_cs_packet_next_reloc(p
, &reloc
);
2219 DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
2223 if (reloc
->robj
!= track
->vgt_strmout_bo
[idx_value
]) {
2224 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
2228 offset
= radeon_get_ib_value(p
, idx
+1) << 8;
2229 if (offset
!= track
->vgt_strmout_bo_offset
[idx_value
]) {
2230 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
2231 offset
, track
->vgt_strmout_bo_offset
[idx_value
]);
2235 if ((offset
+ 4) > radeon_bo_size(reloc
->robj
)) {
2236 DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
2237 offset
+ 4, radeon_bo_size(reloc
->robj
));
2240 ib
[idx
+1] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
2243 case PACKET3_SURFACE_BASE_UPDATE
:
2244 if (p
->family
>= CHIP_RV770
|| p
->family
== CHIP_R600
) {
2245 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2249 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2253 case PACKET3_STRMOUT_BUFFER_UPDATE
:
2254 if (pkt
->count
!= 4) {
2255 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2258 /* Updating memory at DST_ADDRESS. */
2259 if (idx_value
& 0x1) {
2261 r
= r600_cs_packet_next_reloc(p
, &reloc
);
2263 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2266 offset
= radeon_get_ib_value(p
, idx
+1);
2267 offset
+= ((u64
)(radeon_get_ib_value(p
, idx
+2) & 0xff)) << 32;
2268 if ((offset
+ 4) > radeon_bo_size(reloc
->robj
)) {
2269 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
2270 offset
+ 4, radeon_bo_size(reloc
->robj
));
2273 offset
+= reloc
->lobj
.gpu_offset
;
2275 ib
[idx
+2] = upper_32_bits(offset
) & 0xff;
2277 /* Reading data from SRC_ADDRESS. */
2278 if (((idx_value
>> 1) & 0x3) == 2) {
2280 r
= r600_cs_packet_next_reloc(p
, &reloc
);
2282 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2285 offset
= radeon_get_ib_value(p
, idx
+3);
2286 offset
+= ((u64
)(radeon_get_ib_value(p
, idx
+4) & 0xff)) << 32;
2287 if ((offset
+ 4) > radeon_bo_size(reloc
->robj
)) {
2288 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
2289 offset
+ 4, radeon_bo_size(reloc
->robj
));
2292 offset
+= reloc
->lobj
.gpu_offset
;
2294 ib
[idx
+4] = upper_32_bits(offset
) & 0xff;
2297 case PACKET3_MEM_WRITE
:
2301 if (pkt
->count
!= 3) {
2302 DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2305 r
= r600_cs_packet_next_reloc(p
, &reloc
);
2307 DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2310 offset
= radeon_get_ib_value(p
, idx
+0);
2311 offset
+= ((u64
)(radeon_get_ib_value(p
, idx
+1) & 0xff)) << 32UL;
2313 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2316 if ((offset
+ 8) > radeon_bo_size(reloc
->robj
)) {
2317 DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
2318 offset
+ 8, radeon_bo_size(reloc
->robj
));
2321 offset
+= reloc
->lobj
.gpu_offset
;
2323 ib
[idx
+1] = upper_32_bits(offset
) & 0xff;
2326 case PACKET3_COPY_DW
:
2327 if (pkt
->count
!= 4) {
2328 DRM_ERROR("bad COPY_DW (invalid count)\n");
2331 if (idx_value
& 0x1) {
2333 /* SRC is memory. */
2334 r
= r600_cs_packet_next_reloc(p
, &reloc
);
2336 DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2339 offset
= radeon_get_ib_value(p
, idx
+1);
2340 offset
+= ((u64
)(radeon_get_ib_value(p
, idx
+2) & 0xff)) << 32;
2341 if ((offset
+ 4) > radeon_bo_size(reloc
->robj
)) {
2342 DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
2343 offset
+ 4, radeon_bo_size(reloc
->robj
));
2346 offset
+= reloc
->lobj
.gpu_offset
;
2348 ib
[idx
+2] = upper_32_bits(offset
) & 0xff;
2351 reg
= radeon_get_ib_value(p
, idx
+1) << 2;
2352 if (!r600_is_safe_reg(p
, reg
, idx
+1))
2355 if (idx_value
& 0x2) {
2357 /* DST is memory. */
2358 r
= r600_cs_packet_next_reloc(p
, &reloc
);
2360 DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2363 offset
= radeon_get_ib_value(p
, idx
+3);
2364 offset
+= ((u64
)(radeon_get_ib_value(p
, idx
+4) & 0xff)) << 32;
2365 if ((offset
+ 4) > radeon_bo_size(reloc
->robj
)) {
2366 DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
2367 offset
+ 4, radeon_bo_size(reloc
->robj
));
2370 offset
+= reloc
->lobj
.gpu_offset
;
2372 ib
[idx
+4] = upper_32_bits(offset
) & 0xff;
2375 reg
= radeon_get_ib_value(p
, idx
+3) << 2;
2376 if (!r600_is_safe_reg(p
, reg
, idx
+3))
2383 DRM_ERROR("Packet3 opcode %x not supported\n", pkt
->opcode
);
2389 int r600_cs_parse(struct radeon_cs_parser
*p
)
2391 struct radeon_cs_packet pkt
;
2392 struct r600_cs_track
*track
;
2395 if (p
->track
== NULL
) {
2396 /* initialize tracker, we are in kms */
2397 track
= kzalloc(sizeof(*track
), GFP_KERNEL
);
2400 r600_cs_track_init(track
);
2401 if (p
->rdev
->family
< CHIP_RV770
) {
2402 track
->npipes
= p
->rdev
->config
.r600
.tiling_npipes
;
2403 track
->nbanks
= p
->rdev
->config
.r600
.tiling_nbanks
;
2404 track
->group_size
= p
->rdev
->config
.r600
.tiling_group_size
;
2405 } else if (p
->rdev
->family
<= CHIP_RV740
) {
2406 track
->npipes
= p
->rdev
->config
.rv770
.tiling_npipes
;
2407 track
->nbanks
= p
->rdev
->config
.rv770
.tiling_nbanks
;
2408 track
->group_size
= p
->rdev
->config
.rv770
.tiling_group_size
;
2413 r
= r600_cs_packet_parse(p
, &pkt
, p
->idx
);
2419 p
->idx
+= pkt
.count
+ 2;
2422 r
= r600_cs_parse_packet0(p
, &pkt
);
2427 r
= r600_packet3_check(p
, &pkt
);
2430 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
2440 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
2442 for (r
= 0; r
< p
->ib
.length_dw
; r
++) {
2443 printk(KERN_INFO
"%05d 0x%08X\n", r
, p
->ib
.ptr
[r
]);
2452 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser
*p
)
2454 if (p
->chunk_relocs_idx
== -1) {
2457 p
->relocs
= kzalloc(sizeof(struct radeon_cs_reloc
), GFP_KERNEL
);
2458 if (p
->relocs
== NULL
) {
2465 * cs_parser_fini() - clean parser states
2466 * @parser: parser structure holding parsing context.
2467 * @error: error number
2469 * If error is set than unvalidate buffer, otherwise just free memory
2470 * used by parsing context.
2472 static void r600_cs_parser_fini(struct radeon_cs_parser
*parser
, int error
)
2476 kfree(parser
->relocs
);
2477 for (i
= 0; i
< parser
->nchunks
; i
++) {
2478 kfree(parser
->chunks
[i
].kdata
);
2479 if (parser
->rdev
&& (parser
->rdev
->flags
& RADEON_IS_AGP
)) {
2480 kfree(parser
->chunks
[i
].kpage
[0]);
2481 kfree(parser
->chunks
[i
].kpage
[1]);
2484 kfree(parser
->chunks
);
2485 kfree(parser
->chunks_array
);
2488 int r600_cs_legacy(struct drm_device
*dev
, void *data
, struct drm_file
*filp
,
2489 unsigned family
, u32
*ib
, int *l
)
2491 struct radeon_cs_parser parser
;
2492 struct radeon_cs_chunk
*ib_chunk
;
2493 struct r600_cs_track
*track
;
2496 /* initialize tracker */
2497 track
= kzalloc(sizeof(*track
), GFP_KERNEL
);
2500 r600_cs_track_init(track
);
2501 r600_cs_legacy_get_tiling_conf(dev
, &track
->npipes
, &track
->nbanks
, &track
->group_size
);
2502 /* initialize parser */
2503 memset(&parser
, 0, sizeof(struct radeon_cs_parser
));
2505 parser
.dev
= &dev
->pdev
->dev
;
2507 parser
.family
= family
;
2508 parser
.track
= track
;
2510 r
= radeon_cs_parser_init(&parser
, data
);
2512 DRM_ERROR("Failed to initialize parser !\n");
2513 r600_cs_parser_fini(&parser
, r
);
2516 r
= r600_cs_parser_relocs_legacy(&parser
);
2518 DRM_ERROR("Failed to parse relocation !\n");
2519 r600_cs_parser_fini(&parser
, r
);
2522 /* Copy the packet into the IB, the parser will read from the
2523 * input memory (cached) and write to the IB (which can be
2525 ib_chunk
= &parser
.chunks
[parser
.chunk_ib_idx
];
2526 parser
.ib
.length_dw
= ib_chunk
->length_dw
;
2527 *l
= parser
.ib
.length_dw
;
2528 r
= r600_cs_parse(&parser
);
2530 DRM_ERROR("Invalid command stream !\n");
2531 r600_cs_parser_fini(&parser
, r
);
2534 r
= radeon_cs_finish_pages(&parser
);
2536 DRM_ERROR("Invalid command stream !\n");
2537 r600_cs_parser_fini(&parser
, r
);
2540 r600_cs_parser_fini(&parser
, r
);
2544 void r600_cs_legacy_init(void)
2546 r600_cs_packet_next_reloc
= &r600_cs_packet_next_reloc_nomm
;
2553 * r600_dma_cs_next_reloc() - parse next reloc
2554 * @p: parser structure holding parsing context.
2555 * @cs_reloc: reloc informations
2557 * Return the next reloc, do bo validation and compute
2558 * GPU offset using the provided start.
2560 int r600_dma_cs_next_reloc(struct radeon_cs_parser
*p
,
2561 struct radeon_cs_reloc
**cs_reloc
)
2563 struct radeon_cs_chunk
*relocs_chunk
;
2567 if (p
->chunk_relocs_idx
== -1) {
2568 DRM_ERROR("No relocation chunk !\n");
2571 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
2572 idx
= p
->dma_reloc_idx
;
2573 if (idx
>= p
->nrelocs
) {
2574 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2578 *cs_reloc
= p
->relocs_ptr
[idx
];
2583 #define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
2584 #define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
2585 #define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
2588 * r600_dma_cs_parse() - parse the DMA IB
2589 * @p: parser structure holding parsing context.
2591 * Parses the DMA IB from the CS ioctl and updates
2592 * the GPU addresses based on the reloc information and
2593 * checks for errors. (R6xx-R7xx)
2594 * Returns 0 for success and an error on failure.
2596 int r600_dma_cs_parse(struct radeon_cs_parser
*p
)
2598 struct radeon_cs_chunk
*ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
2599 struct radeon_cs_reloc
*src_reloc
, *dst_reloc
;
2600 u32 header
, cmd
, count
, tiled
;
2601 volatile u32
*ib
= p
->ib
.ptr
;
2603 u64 src_offset
, dst_offset
;
2607 if (p
->idx
>= ib_chunk
->length_dw
) {
2608 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2609 p
->idx
, ib_chunk
->length_dw
);
2613 header
= radeon_get_ib_value(p
, idx
);
2614 cmd
= GET_DMA_CMD(header
);
2615 count
= GET_DMA_COUNT(header
);
2616 tiled
= GET_DMA_T(header
);
2619 case DMA_PACKET_WRITE
:
2620 r
= r600_dma_cs_next_reloc(p
, &dst_reloc
);
2622 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2626 dst_offset
= ib
[idx
+1];
2629 ib
[idx
+1] += (u32
)(dst_reloc
->lobj
.gpu_offset
>> 8);
2630 p
->idx
+= count
+ 5;
2632 dst_offset
= ib
[idx
+1];
2633 dst_offset
|= ((u64
)(ib
[idx
+2] & 0xff)) << 32;
2635 ib
[idx
+1] += (u32
)(dst_reloc
->lobj
.gpu_offset
& 0xfffffffc);
2636 ib
[idx
+2] += upper_32_bits(dst_reloc
->lobj
.gpu_offset
) & 0xff;
2637 p
->idx
+= count
+ 3;
2639 if ((dst_offset
+ (count
* 4)) > radeon_bo_size(dst_reloc
->robj
)) {
2640 dev_warn(p
->dev
, "DMA write buffer too small (%llu %lu)\n",
2641 dst_offset
+ (count
* 4), radeon_bo_size(dst_reloc
->robj
));
2645 case DMA_PACKET_COPY
:
2646 r
= r600_dma_cs_next_reloc(p
, &src_reloc
);
2648 DRM_ERROR("bad DMA_PACKET_COPY\n");
2651 r
= r600_dma_cs_next_reloc(p
, &dst_reloc
);
2653 DRM_ERROR("bad DMA_PACKET_COPY\n");
2657 idx_value
= radeon_get_ib_value(p
, idx
+ 2);
2659 if (idx_value
& (1 << 31)) {
2660 /* tiled src, linear dst */
2661 src_offset
= ib
[idx
+1];
2663 ib
[idx
+1] += (u32
)(src_reloc
->lobj
.gpu_offset
>> 8);
2665 dst_offset
= ib
[idx
+5];
2666 dst_offset
|= ((u64
)(ib
[idx
+6] & 0xff)) << 32;
2667 ib
[idx
+5] += (u32
)(dst_reloc
->lobj
.gpu_offset
& 0xfffffffc);
2668 ib
[idx
+6] += upper_32_bits(dst_reloc
->lobj
.gpu_offset
) & 0xff;
2670 /* linear src, tiled dst */
2671 src_offset
= ib
[idx
+5];
2672 src_offset
|= ((u64
)(ib
[idx
+6] & 0xff)) << 32;
2673 ib
[idx
+5] += (u32
)(src_reloc
->lobj
.gpu_offset
& 0xfffffffc);
2674 ib
[idx
+6] += upper_32_bits(src_reloc
->lobj
.gpu_offset
) & 0xff;
2676 dst_offset
= ib
[idx
+1];
2678 ib
[idx
+1] += (u32
)(dst_reloc
->lobj
.gpu_offset
>> 8);
2682 if (p
->family
>= CHIP_RV770
) {
2683 src_offset
= ib
[idx
+2];
2684 src_offset
|= ((u64
)(ib
[idx
+4] & 0xff)) << 32;
2685 dst_offset
= ib
[idx
+1];
2686 dst_offset
|= ((u64
)(ib
[idx
+3] & 0xff)) << 32;
2688 ib
[idx
+1] += (u32
)(dst_reloc
->lobj
.gpu_offset
& 0xfffffffc);
2689 ib
[idx
+2] += (u32
)(src_reloc
->lobj
.gpu_offset
& 0xfffffffc);
2690 ib
[idx
+3] += upper_32_bits(dst_reloc
->lobj
.gpu_offset
) & 0xff;
2691 ib
[idx
+4] += upper_32_bits(src_reloc
->lobj
.gpu_offset
) & 0xff;
2694 src_offset
= ib
[idx
+2];
2695 src_offset
|= ((u64
)(ib
[idx
+3] & 0xff)) << 32;
2696 dst_offset
= ib
[idx
+1];
2697 dst_offset
|= ((u64
)(ib
[idx
+3] & 0xff0000)) << 16;
2699 ib
[idx
+1] += (u32
)(dst_reloc
->lobj
.gpu_offset
& 0xfffffffc);
2700 ib
[idx
+2] += (u32
)(src_reloc
->lobj
.gpu_offset
& 0xfffffffc);
2701 ib
[idx
+3] += upper_32_bits(src_reloc
->lobj
.gpu_offset
) & 0xff;
2702 ib
[idx
+3] += (upper_32_bits(dst_reloc
->lobj
.gpu_offset
) & 0xff) << 16;
2706 if ((src_offset
+ (count
* 4)) > radeon_bo_size(src_reloc
->robj
)) {
2707 dev_warn(p
->dev
, "DMA copy src buffer too small (%llu %lu)\n",
2708 src_offset
+ (count
* 4), radeon_bo_size(src_reloc
->robj
));
2711 if ((dst_offset
+ (count
* 4)) > radeon_bo_size(dst_reloc
->robj
)) {
2712 dev_warn(p
->dev
, "DMA write dst buffer too small (%llu %lu)\n",
2713 dst_offset
+ (count
* 4), radeon_bo_size(dst_reloc
->robj
));
2717 case DMA_PACKET_CONSTANT_FILL
:
2718 if (p
->family
< CHIP_RV770
) {
2719 DRM_ERROR("Constant Fill is 7xx only !\n");
2722 r
= r600_dma_cs_next_reloc(p
, &dst_reloc
);
2724 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2727 dst_offset
= ib
[idx
+1];
2728 dst_offset
|= ((u64
)(ib
[idx
+3] & 0x00ff0000)) << 16;
2729 if ((dst_offset
+ (count
* 4)) > radeon_bo_size(dst_reloc
->robj
)) {
2730 dev_warn(p
->dev
, "DMA constant fill buffer too small (%llu %lu)\n",
2731 dst_offset
+ (count
* 4), radeon_bo_size(dst_reloc
->robj
));
2734 ib
[idx
+1] += (u32
)(dst_reloc
->lobj
.gpu_offset
& 0xfffffffc);
2735 ib
[idx
+3] += (upper_32_bits(dst_reloc
->lobj
.gpu_offset
) << 16) & 0x00ff0000;
2738 case DMA_PACKET_NOP
:
2742 DRM_ERROR("Unknown packet type %d at %d !\n", cmd
, idx
);
2745 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
2747 for (r
= 0; r
< p
->ib
->length_dw
; r
++) {
2748 printk(KERN_INFO
"%05d 0x%08X\n", r
, p
->ib
.ptr
[r
]);