c4325f1e2294d271bbc393bd2f9563b877855ffd
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / char / drm / radeon_state.c
1 /* radeon_state.c -- State support for Radeon -*- linux-c -*-
2 *
3 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Gareth Hughes <gareth@valinux.com>
27 * Kevin E. Martin <martin@valinux.com>
28 */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "drm_sarea.h"
33 #include "radeon_drm.h"
34 #include "radeon_drv.h"
35
36 /* ================================================================
37 * Helper functions for client state checking and fixup
38 */
39
40 static __inline__ int radeon_check_and_fixup_offset( drm_radeon_private_t *dev_priv,
41 drm_file_t *filp_priv,
42 u32 *offset ) {
43 u32 off = *offset;
44 struct drm_radeon_driver_file_fields *radeon_priv;
45
46 if ( off >= dev_priv->fb_location &&
47 off < ( dev_priv->gart_vm_start + dev_priv->gart_size ) )
48 return 0;
49
50 radeon_priv = filp_priv->driver_priv;
51 off += radeon_priv->radeon_fb_delta;
52
53 DRM_DEBUG( "offset fixed up to 0x%x\n", off );
54
55 if ( off < dev_priv->fb_location ||
56 off >= ( dev_priv->gart_vm_start + dev_priv->gart_size ) )
57 return DRM_ERR( EINVAL );
58
59 *offset = off;
60
61 return 0;
62 }
63
64 static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_priv,
65 drm_file_t *filp_priv,
66 int id,
67 u32 __user *data ) {
68 switch ( id ) {
69
70 case RADEON_EMIT_PP_MISC:
71 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
72 &data[( RADEON_RB3D_DEPTHOFFSET
73 - RADEON_PP_MISC ) / 4] ) ) {
74 DRM_ERROR( "Invalid depth buffer offset\n" );
75 return DRM_ERR( EINVAL );
76 }
77 break;
78
79 case RADEON_EMIT_PP_CNTL:
80 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
81 &data[( RADEON_RB3D_COLOROFFSET
82 - RADEON_PP_CNTL ) / 4] ) ) {
83 DRM_ERROR( "Invalid colour buffer offset\n" );
84 return DRM_ERR( EINVAL );
85 }
86 break;
87
88 case R200_EMIT_PP_TXOFFSET_0:
89 case R200_EMIT_PP_TXOFFSET_1:
90 case R200_EMIT_PP_TXOFFSET_2:
91 case R200_EMIT_PP_TXOFFSET_3:
92 case R200_EMIT_PP_TXOFFSET_4:
93 case R200_EMIT_PP_TXOFFSET_5:
94 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
95 &data[0] ) ) {
96 DRM_ERROR( "Invalid R200 texture offset\n" );
97 return DRM_ERR( EINVAL );
98 }
99 break;
100
101 case RADEON_EMIT_PP_TXFILTER_0:
102 case RADEON_EMIT_PP_TXFILTER_1:
103 case RADEON_EMIT_PP_TXFILTER_2:
104 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
105 &data[( RADEON_PP_TXOFFSET_0
106 - RADEON_PP_TXFILTER_0 ) / 4] ) ) {
107 DRM_ERROR( "Invalid R100 texture offset\n" );
108 return DRM_ERR( EINVAL );
109 }
110 break;
111
112 case R200_EMIT_PP_CUBIC_OFFSETS_0:
113 case R200_EMIT_PP_CUBIC_OFFSETS_1:
114 case R200_EMIT_PP_CUBIC_OFFSETS_2:
115 case R200_EMIT_PP_CUBIC_OFFSETS_3:
116 case R200_EMIT_PP_CUBIC_OFFSETS_4:
117 case R200_EMIT_PP_CUBIC_OFFSETS_5: {
118 int i;
119 for ( i = 0; i < 5; i++ ) {
120 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
121 &data[i] ) ) {
122 DRM_ERROR( "Invalid R200 cubic texture offset\n" );
123 return DRM_ERR( EINVAL );
124 }
125 }
126 break;
127 }
128
129 case RADEON_EMIT_PP_CUBIC_OFFSETS_T0:
130 case RADEON_EMIT_PP_CUBIC_OFFSETS_T1:
131 case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
132 int i;
133 for (i = 0; i < 5; i++) {
134 if (radeon_check_and_fixup_offset(dev_priv,
135 filp_priv,
136 &data[i])) {
137 DRM_ERROR
138 ("Invalid R100 cubic texture offset\n");
139 return DRM_ERR(EINVAL);
140 }
141 }
142 }
143 break;
144
145 case RADEON_EMIT_RB3D_COLORPITCH:
146 case RADEON_EMIT_RE_LINE_PATTERN:
147 case RADEON_EMIT_SE_LINE_WIDTH:
148 case RADEON_EMIT_PP_LUM_MATRIX:
149 case RADEON_EMIT_PP_ROT_MATRIX_0:
150 case RADEON_EMIT_RB3D_STENCILREFMASK:
151 case RADEON_EMIT_SE_VPORT_XSCALE:
152 case RADEON_EMIT_SE_CNTL:
153 case RADEON_EMIT_SE_CNTL_STATUS:
154 case RADEON_EMIT_RE_MISC:
155 case RADEON_EMIT_PP_BORDER_COLOR_0:
156 case RADEON_EMIT_PP_BORDER_COLOR_1:
157 case RADEON_EMIT_PP_BORDER_COLOR_2:
158 case RADEON_EMIT_SE_ZBIAS_FACTOR:
159 case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT:
160 case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED:
161 case R200_EMIT_PP_TXCBLEND_0:
162 case R200_EMIT_PP_TXCBLEND_1:
163 case R200_EMIT_PP_TXCBLEND_2:
164 case R200_EMIT_PP_TXCBLEND_3:
165 case R200_EMIT_PP_TXCBLEND_4:
166 case R200_EMIT_PP_TXCBLEND_5:
167 case R200_EMIT_PP_TXCBLEND_6:
168 case R200_EMIT_PP_TXCBLEND_7:
169 case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
170 case R200_EMIT_TFACTOR_0:
171 case R200_EMIT_VTX_FMT_0:
172 case R200_EMIT_VAP_CTL:
173 case R200_EMIT_MATRIX_SELECT_0:
174 case R200_EMIT_TEX_PROC_CTL_2:
175 case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
176 case R200_EMIT_PP_TXFILTER_0:
177 case R200_EMIT_PP_TXFILTER_1:
178 case R200_EMIT_PP_TXFILTER_2:
179 case R200_EMIT_PP_TXFILTER_3:
180 case R200_EMIT_PP_TXFILTER_4:
181 case R200_EMIT_PP_TXFILTER_5:
182 case R200_EMIT_VTE_CNTL:
183 case R200_EMIT_OUTPUT_VTX_COMP_SEL:
184 case R200_EMIT_PP_TAM_DEBUG3:
185 case R200_EMIT_PP_CNTL_X:
186 case R200_EMIT_RB3D_DEPTHXY_OFFSET:
187 case R200_EMIT_RE_AUX_SCISSOR_CNTL:
188 case R200_EMIT_RE_SCISSOR_TL_0:
189 case R200_EMIT_RE_SCISSOR_TL_1:
190 case R200_EMIT_RE_SCISSOR_TL_2:
191 case R200_EMIT_SE_VAP_CNTL_STATUS:
192 case R200_EMIT_SE_VTX_STATE_CNTL:
193 case R200_EMIT_RE_POINTSIZE:
194 case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0:
195 case R200_EMIT_PP_CUBIC_FACES_0:
196 case R200_EMIT_PP_CUBIC_FACES_1:
197 case R200_EMIT_PP_CUBIC_FACES_2:
198 case R200_EMIT_PP_CUBIC_FACES_3:
199 case R200_EMIT_PP_CUBIC_FACES_4:
200 case R200_EMIT_PP_CUBIC_FACES_5:
201 case RADEON_EMIT_PP_TEX_SIZE_0:
202 case RADEON_EMIT_PP_TEX_SIZE_1:
203 case RADEON_EMIT_PP_TEX_SIZE_2:
204 case R200_EMIT_RB3D_BLENDCOLOR:
205 case R200_EMIT_TCL_POINT_SPRITE_CNTL:
206 case RADEON_EMIT_PP_CUBIC_FACES_0:
207 case RADEON_EMIT_PP_CUBIC_FACES_1:
208 case RADEON_EMIT_PP_CUBIC_FACES_2:
209 case R200_EMIT_PP_TRI_PERF_CNTL:
210 case R200_EMIT_PP_AFS_0:
211 case R200_EMIT_PP_AFS_1:
212 case R200_EMIT_ATF_TFACTOR:
213 case R200_EMIT_PP_TXCTLALL_0:
214 case R200_EMIT_PP_TXCTLALL_1:
215 case R200_EMIT_PP_TXCTLALL_2:
216 case R200_EMIT_PP_TXCTLALL_3:
217 case R200_EMIT_PP_TXCTLALL_4:
218 case R200_EMIT_PP_TXCTLALL_5:
219 /* These packets don't contain memory offsets */
220 break;
221
222 default:
223 DRM_ERROR( "Unknown state packet ID %d\n", id );
224 return DRM_ERR( EINVAL );
225 }
226
227 return 0;
228 }
229
230 static __inline__ int radeon_check_and_fixup_packet3( drm_radeon_private_t *dev_priv,
231 drm_file_t *filp_priv,
232 drm_radeon_cmd_buffer_t *cmdbuf,
233 unsigned int *cmdsz ) {
234 u32 *cmd = (u32 *) cmdbuf->buf;
235
236 *cmdsz = 2 + ( ( cmd[0] & RADEON_CP_PACKET_COUNT_MASK ) >> 16 );
237
238 if ( ( cmd[0] & 0xc0000000 ) != RADEON_CP_PACKET3 ) {
239 DRM_ERROR( "Not a type 3 packet\n" );
240 return DRM_ERR( EINVAL );
241 }
242
243 if ( 4 * *cmdsz > cmdbuf->bufsz ) {
244 DRM_ERROR( "Packet size larger than size of data provided\n" );
245 return DRM_ERR( EINVAL );
246 }
247
248 /* Check client state and fix it up if necessary */
249 if ( cmd[0] & 0x8000 ) { /* MSB of opcode: next DWORD GUI_CNTL */
250 u32 offset;
251
252 if ( cmd[1] & ( RADEON_GMC_SRC_PITCH_OFFSET_CNTL
253 | RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) {
254 offset = cmd[2] << 10;
255 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) {
256 DRM_ERROR( "Invalid first packet offset\n" );
257 return DRM_ERR( EINVAL );
258 }
259 cmd[2] = ( cmd[2] & 0xffc00000 ) | offset >> 10;
260 }
261
262 if ( ( cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL ) &&
263 ( cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL ) ) {
264 offset = cmd[3] << 10;
265 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) {
266 DRM_ERROR( "Invalid second packet offset\n" );
267 return DRM_ERR( EINVAL );
268 }
269 cmd[3] = ( cmd[3] & 0xffc00000 ) | offset >> 10;
270 }
271 }
272
273 return 0;
274 }
275
276
277 /* ================================================================
278 * CP hardware state programming functions
279 */
280
281 static __inline__ void radeon_emit_clip_rect( drm_radeon_private_t *dev_priv,
282 drm_clip_rect_t *box )
283 {
284 RING_LOCALS;
285
286 DRM_DEBUG( " box: x1=%d y1=%d x2=%d y2=%d\n",
287 box->x1, box->y1, box->x2, box->y2 );
288
289 BEGIN_RING( 4 );
290 OUT_RING( CP_PACKET0( RADEON_RE_TOP_LEFT, 0 ) );
291 OUT_RING( (box->y1 << 16) | box->x1 );
292 OUT_RING( CP_PACKET0( RADEON_RE_WIDTH_HEIGHT, 0 ) );
293 OUT_RING( ((box->y2 - 1) << 16) | (box->x2 - 1) );
294 ADVANCE_RING();
295 }
296
297 /* Emit 1.1 state
298 */
299 static int radeon_emit_state( drm_radeon_private_t *dev_priv,
300 drm_file_t *filp_priv,
301 drm_radeon_context_regs_t *ctx,
302 drm_radeon_texture_regs_t *tex,
303 unsigned int dirty )
304 {
305 RING_LOCALS;
306 DRM_DEBUG( "dirty=0x%08x\n", dirty );
307
308 if ( dirty & RADEON_UPLOAD_CONTEXT ) {
309 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
310 &ctx->rb3d_depthoffset ) ) {
311 DRM_ERROR( "Invalid depth buffer offset\n" );
312 return DRM_ERR( EINVAL );
313 }
314
315 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
316 &ctx->rb3d_coloroffset ) ) {
317 DRM_ERROR( "Invalid depth buffer offset\n" );
318 return DRM_ERR( EINVAL );
319 }
320
321 BEGIN_RING( 14 );
322 OUT_RING( CP_PACKET0( RADEON_PP_MISC, 6 ) );
323 OUT_RING( ctx->pp_misc );
324 OUT_RING( ctx->pp_fog_color );
325 OUT_RING( ctx->re_solid_color );
326 OUT_RING( ctx->rb3d_blendcntl );
327 OUT_RING( ctx->rb3d_depthoffset );
328 OUT_RING( ctx->rb3d_depthpitch );
329 OUT_RING( ctx->rb3d_zstencilcntl );
330 OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 2 ) );
331 OUT_RING( ctx->pp_cntl );
332 OUT_RING( ctx->rb3d_cntl );
333 OUT_RING( ctx->rb3d_coloroffset );
334 OUT_RING( CP_PACKET0( RADEON_RB3D_COLORPITCH, 0 ) );
335 OUT_RING( ctx->rb3d_colorpitch );
336 ADVANCE_RING();
337 }
338
339 if ( dirty & RADEON_UPLOAD_VERTFMT ) {
340 BEGIN_RING( 2 );
341 OUT_RING( CP_PACKET0( RADEON_SE_COORD_FMT, 0 ) );
342 OUT_RING( ctx->se_coord_fmt );
343 ADVANCE_RING();
344 }
345
346 if ( dirty & RADEON_UPLOAD_LINE ) {
347 BEGIN_RING( 5 );
348 OUT_RING( CP_PACKET0( RADEON_RE_LINE_PATTERN, 1 ) );
349 OUT_RING( ctx->re_line_pattern );
350 OUT_RING( ctx->re_line_state );
351 OUT_RING( CP_PACKET0( RADEON_SE_LINE_WIDTH, 0 ) );
352 OUT_RING( ctx->se_line_width );
353 ADVANCE_RING();
354 }
355
356 if ( dirty & RADEON_UPLOAD_BUMPMAP ) {
357 BEGIN_RING( 5 );
358 OUT_RING( CP_PACKET0( RADEON_PP_LUM_MATRIX, 0 ) );
359 OUT_RING( ctx->pp_lum_matrix );
360 OUT_RING( CP_PACKET0( RADEON_PP_ROT_MATRIX_0, 1 ) );
361 OUT_RING( ctx->pp_rot_matrix_0 );
362 OUT_RING( ctx->pp_rot_matrix_1 );
363 ADVANCE_RING();
364 }
365
366 if ( dirty & RADEON_UPLOAD_MASKS ) {
367 BEGIN_RING( 4 );
368 OUT_RING( CP_PACKET0( RADEON_RB3D_STENCILREFMASK, 2 ) );
369 OUT_RING( ctx->rb3d_stencilrefmask );
370 OUT_RING( ctx->rb3d_ropcntl );
371 OUT_RING( ctx->rb3d_planemask );
372 ADVANCE_RING();
373 }
374
375 if ( dirty & RADEON_UPLOAD_VIEWPORT ) {
376 BEGIN_RING( 7 );
377 OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 5 ) );
378 OUT_RING( ctx->se_vport_xscale );
379 OUT_RING( ctx->se_vport_xoffset );
380 OUT_RING( ctx->se_vport_yscale );
381 OUT_RING( ctx->se_vport_yoffset );
382 OUT_RING( ctx->se_vport_zscale );
383 OUT_RING( ctx->se_vport_zoffset );
384 ADVANCE_RING();
385 }
386
387 if ( dirty & RADEON_UPLOAD_SETUP ) {
388 BEGIN_RING( 4 );
389 OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) );
390 OUT_RING( ctx->se_cntl );
391 OUT_RING( CP_PACKET0( RADEON_SE_CNTL_STATUS, 0 ) );
392 OUT_RING( ctx->se_cntl_status );
393 ADVANCE_RING();
394 }
395
396 if ( dirty & RADEON_UPLOAD_MISC ) {
397 BEGIN_RING( 2 );
398 OUT_RING( CP_PACKET0( RADEON_RE_MISC, 0 ) );
399 OUT_RING( ctx->re_misc );
400 ADVANCE_RING();
401 }
402
403 if ( dirty & RADEON_UPLOAD_TEX0 ) {
404 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
405 &tex[0].pp_txoffset ) ) {
406 DRM_ERROR( "Invalid texture offset for unit 0\n" );
407 return DRM_ERR( EINVAL );
408 }
409
410 BEGIN_RING( 9 );
411 OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_0, 5 ) );
412 OUT_RING( tex[0].pp_txfilter );
413 OUT_RING( tex[0].pp_txformat );
414 OUT_RING( tex[0].pp_txoffset );
415 OUT_RING( tex[0].pp_txcblend );
416 OUT_RING( tex[0].pp_txablend );
417 OUT_RING( tex[0].pp_tfactor );
418 OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_0, 0 ) );
419 OUT_RING( tex[0].pp_border_color );
420 ADVANCE_RING();
421 }
422
423 if ( dirty & RADEON_UPLOAD_TEX1 ) {
424 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
425 &tex[1].pp_txoffset ) ) {
426 DRM_ERROR( "Invalid texture offset for unit 1\n" );
427 return DRM_ERR( EINVAL );
428 }
429
430 BEGIN_RING( 9 );
431 OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_1, 5 ) );
432 OUT_RING( tex[1].pp_txfilter );
433 OUT_RING( tex[1].pp_txformat );
434 OUT_RING( tex[1].pp_txoffset );
435 OUT_RING( tex[1].pp_txcblend );
436 OUT_RING( tex[1].pp_txablend );
437 OUT_RING( tex[1].pp_tfactor );
438 OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_1, 0 ) );
439 OUT_RING( tex[1].pp_border_color );
440 ADVANCE_RING();
441 }
442
443 if ( dirty & RADEON_UPLOAD_TEX2 ) {
444 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv,
445 &tex[2].pp_txoffset ) ) {
446 DRM_ERROR( "Invalid texture offset for unit 2\n" );
447 return DRM_ERR( EINVAL );
448 }
449
450 BEGIN_RING( 9 );
451 OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_2, 5 ) );
452 OUT_RING( tex[2].pp_txfilter );
453 OUT_RING( tex[2].pp_txformat );
454 OUT_RING( tex[2].pp_txoffset );
455 OUT_RING( tex[2].pp_txcblend );
456 OUT_RING( tex[2].pp_txablend );
457 OUT_RING( tex[2].pp_tfactor );
458 OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_2, 0 ) );
459 OUT_RING( tex[2].pp_border_color );
460 ADVANCE_RING();
461 }
462
463 return 0;
464 }
465
466 /* Emit 1.2 state
467 */
468 static int radeon_emit_state2( drm_radeon_private_t *dev_priv,
469 drm_file_t *filp_priv,
470 drm_radeon_state_t *state )
471 {
472 RING_LOCALS;
473
474 if (state->dirty & RADEON_UPLOAD_ZBIAS) {
475 BEGIN_RING( 3 );
476 OUT_RING( CP_PACKET0( RADEON_SE_ZBIAS_FACTOR, 1 ) );
477 OUT_RING( state->context2.se_zbias_factor );
478 OUT_RING( state->context2.se_zbias_constant );
479 ADVANCE_RING();
480 }
481
482 return radeon_emit_state( dev_priv, filp_priv, &state->context,
483 state->tex, state->dirty );
484 }
485
486 /* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in
487 * 1.3 cmdbuffers allow all previous state to be updated as well as
488 * the tcl scalar and vector areas.
489 */
490 static struct {
491 int start;
492 int len;
493 const char *name;
494 } packet[RADEON_MAX_STATE_PACKETS] = {
495 { RADEON_PP_MISC,7,"RADEON_PP_MISC" },
496 { RADEON_PP_CNTL,3,"RADEON_PP_CNTL" },
497 { RADEON_RB3D_COLORPITCH,1,"RADEON_RB3D_COLORPITCH" },
498 { RADEON_RE_LINE_PATTERN,2,"RADEON_RE_LINE_PATTERN" },
499 { RADEON_SE_LINE_WIDTH,1,"RADEON_SE_LINE_WIDTH" },
500 { RADEON_PP_LUM_MATRIX,1,"RADEON_PP_LUM_MATRIX" },
501 { RADEON_PP_ROT_MATRIX_0,2,"RADEON_PP_ROT_MATRIX_0" },
502 { RADEON_RB3D_STENCILREFMASK,3,"RADEON_RB3D_STENCILREFMASK" },
503 { RADEON_SE_VPORT_XSCALE,6,"RADEON_SE_VPORT_XSCALE" },
504 { RADEON_SE_CNTL,2,"RADEON_SE_CNTL" },
505 { RADEON_SE_CNTL_STATUS,1,"RADEON_SE_CNTL_STATUS" },
506 { RADEON_RE_MISC,1,"RADEON_RE_MISC" },
507 { RADEON_PP_TXFILTER_0,6,"RADEON_PP_TXFILTER_0" },
508 { RADEON_PP_BORDER_COLOR_0,1,"RADEON_PP_BORDER_COLOR_0" },
509 { RADEON_PP_TXFILTER_1,6,"RADEON_PP_TXFILTER_1" },
510 { RADEON_PP_BORDER_COLOR_1,1,"RADEON_PP_BORDER_COLOR_1" },
511 { RADEON_PP_TXFILTER_2,6,"RADEON_PP_TXFILTER_2" },
512 { RADEON_PP_BORDER_COLOR_2,1,"RADEON_PP_BORDER_COLOR_2" },
513 { RADEON_SE_ZBIAS_FACTOR,2,"RADEON_SE_ZBIAS_FACTOR" },
514 { RADEON_SE_TCL_OUTPUT_VTX_FMT,11,"RADEON_SE_TCL_OUTPUT_VTX_FMT" },
515 { RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED,17,"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED" },
516 { R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0" },
517 { R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1" },
518 { R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2" },
519 { R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3" },
520 { R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4" },
521 { R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5" },
522 { R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6" },
523 { R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7" },
524 { R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0" },
525 { R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0" },
526 { R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0" },
527 { R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL" },
528 { R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0" },
529 { R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2" },
530 { R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL" },
531 { R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0" },
532 { R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1" },
533 { R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2" },
534 { R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3" },
535 { R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4" },
536 { R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5" },
537 { R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0" },
538 { R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1" },
539 { R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2" },
540 { R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3" },
541 { R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4" },
542 { R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5" },
543 { R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL" },
544 { R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL" },
545 { R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3" },
546 { R200_PP_CNTL_X, 1, "R200_PP_CNTL_X" },
547 { R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET" },
548 { R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL" },
549 { R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0" },
550 { R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1" },
551 { R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2" },
552 { R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS" },
553 { R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL" },
554 { R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE" },
555 { R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0" },
556 { R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0" }, /* 61 */
557 { R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0" }, /* 62 */
558 { R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1" },
559 { R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1" },
560 { R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2" },
561 { R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2" },
562 { R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3" },
563 { R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3" },
564 { R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4" },
565 { R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4" },
566 { R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5" },
567 { R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5" },
568 { RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0" },
569 { RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1" },
570 { RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2" },
571 { R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR" },
572 { R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL" },
573 { RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"},
574 { RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"},
575 { RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"},
576 { RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"},
577 { RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
578 { RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
579 { R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
580 { R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */
581 { R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
582 { R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
583 { R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
584 { R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"},
585 { R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"},
586 { R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"},
587 { R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"},
588 { R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"},
589 };
590
591
592
593 /* ================================================================
594 * Performance monitoring functions
595 */
596
597 static void radeon_clear_box( drm_radeon_private_t *dev_priv,
598 int x, int y, int w, int h,
599 int r, int g, int b )
600 {
601 u32 color;
602 RING_LOCALS;
603
604 x += dev_priv->sarea_priv->boxes[0].x1;
605 y += dev_priv->sarea_priv->boxes[0].y1;
606
607 switch ( dev_priv->color_fmt ) {
608 case RADEON_COLOR_FORMAT_RGB565:
609 color = (((r & 0xf8) << 8) |
610 ((g & 0xfc) << 3) |
611 ((b & 0xf8) >> 3));
612 break;
613 case RADEON_COLOR_FORMAT_ARGB8888:
614 default:
615 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
616 break;
617 }
618
619 BEGIN_RING( 4 );
620 RADEON_WAIT_UNTIL_3D_IDLE();
621 OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) );
622 OUT_RING( 0xffffffff );
623 ADVANCE_RING();
624
625 BEGIN_RING( 6 );
626
627 OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
628 OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
629 RADEON_GMC_BRUSH_SOLID_COLOR |
630 (dev_priv->color_fmt << 8) |
631 RADEON_GMC_SRC_DATATYPE_COLOR |
632 RADEON_ROP3_P |
633 RADEON_GMC_CLR_CMP_CNTL_DIS );
634
635 if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) {
636 OUT_RING( dev_priv->front_pitch_offset );
637 } else {
638 OUT_RING( dev_priv->back_pitch_offset );
639 }
640
641 OUT_RING( color );
642
643 OUT_RING( (x << 16) | y );
644 OUT_RING( (w << 16) | h );
645
646 ADVANCE_RING();
647 }
648
649 static void radeon_cp_performance_boxes( drm_radeon_private_t *dev_priv )
650 {
651 /* Collapse various things into a wait flag -- trying to
652 * guess if userspase slept -- better just to have them tell us.
653 */
654 if (dev_priv->stats.last_frame_reads > 1 ||
655 dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
656 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
657 }
658
659 if (dev_priv->stats.freelist_loops) {
660 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
661 }
662
663 /* Purple box for page flipping
664 */
665 if ( dev_priv->stats.boxes & RADEON_BOX_FLIP )
666 radeon_clear_box( dev_priv, 4, 4, 8, 8, 255, 0, 255 );
667
668 /* Red box if we have to wait for idle at any point
669 */
670 if ( dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE )
671 radeon_clear_box( dev_priv, 16, 4, 8, 8, 255, 0, 0 );
672
673 /* Blue box: lost context?
674 */
675
676 /* Yellow box for texture swaps
677 */
678 if ( dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD )
679 radeon_clear_box( dev_priv, 40, 4, 8, 8, 255, 255, 0 );
680
681 /* Green box if hardware never idles (as far as we can tell)
682 */
683 if ( !(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE) )
684 radeon_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 );
685
686
687 /* Draw bars indicating number of buffers allocated
688 * (not a great measure, easily confused)
689 */
690 if (dev_priv->stats.requested_bufs) {
691 if (dev_priv->stats.requested_bufs > 100)
692 dev_priv->stats.requested_bufs = 100;
693
694 radeon_clear_box( dev_priv, 4, 16,
695 dev_priv->stats.requested_bufs, 4,
696 196, 128, 128 );
697 }
698
699 memset( &dev_priv->stats, 0, sizeof(dev_priv->stats) );
700
701 }
702 /* ================================================================
703 * CP command dispatch functions
704 */
705
706 static void radeon_cp_dispatch_clear( drm_device_t *dev,
707 drm_radeon_clear_t *clear,
708 drm_radeon_clear_rect_t *depth_boxes )
709 {
710 drm_radeon_private_t *dev_priv = dev->dev_private;
711 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
712 drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
713 int nbox = sarea_priv->nbox;
714 drm_clip_rect_t *pbox = sarea_priv->boxes;
715 unsigned int flags = clear->flags;
716 u32 rb3d_cntl = 0, rb3d_stencilrefmask= 0;
717 int i;
718 RING_LOCALS;
719 DRM_DEBUG( "flags = 0x%x\n", flags );
720
721 dev_priv->stats.clears++;
722
723 if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) {
724 unsigned int tmp = flags;
725
726 flags &= ~(RADEON_FRONT | RADEON_BACK);
727 if ( tmp & RADEON_FRONT ) flags |= RADEON_BACK;
728 if ( tmp & RADEON_BACK ) flags |= RADEON_FRONT;
729 }
730
731 if ( flags & (RADEON_FRONT | RADEON_BACK) ) {
732
733 BEGIN_RING( 4 );
734
735 /* Ensure the 3D stream is idle before doing a
736 * 2D fill to clear the front or back buffer.
737 */
738 RADEON_WAIT_UNTIL_3D_IDLE();
739
740 OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) );
741 OUT_RING( clear->color_mask );
742
743 ADVANCE_RING();
744
745 /* Make sure we restore the 3D state next time.
746 */
747 dev_priv->sarea_priv->ctx_owner = 0;
748
749 for ( i = 0 ; i < nbox ; i++ ) {
750 int x = pbox[i].x1;
751 int y = pbox[i].y1;
752 int w = pbox[i].x2 - x;
753 int h = pbox[i].y2 - y;
754
755 DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n",
756 x, y, w, h, flags );
757
758 if ( flags & RADEON_FRONT ) {
759 BEGIN_RING( 6 );
760
761 OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
762 OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
763 RADEON_GMC_BRUSH_SOLID_COLOR |
764 (dev_priv->color_fmt << 8) |
765 RADEON_GMC_SRC_DATATYPE_COLOR |
766 RADEON_ROP3_P |
767 RADEON_GMC_CLR_CMP_CNTL_DIS );
768
769 OUT_RING( dev_priv->front_pitch_offset );
770 OUT_RING( clear->clear_color );
771
772 OUT_RING( (x << 16) | y );
773 OUT_RING( (w << 16) | h );
774
775 ADVANCE_RING();
776 }
777
778 if ( flags & RADEON_BACK ) {
779 BEGIN_RING( 6 );
780
781 OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
782 OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
783 RADEON_GMC_BRUSH_SOLID_COLOR |
784 (dev_priv->color_fmt << 8) |
785 RADEON_GMC_SRC_DATATYPE_COLOR |
786 RADEON_ROP3_P |
787 RADEON_GMC_CLR_CMP_CNTL_DIS );
788
789 OUT_RING( dev_priv->back_pitch_offset );
790 OUT_RING( clear->clear_color );
791
792 OUT_RING( (x << 16) | y );
793 OUT_RING( (w << 16) | h );
794
795 ADVANCE_RING();
796 }
797 }
798 }
799
800 /* hyper z clear */
801 /* no docs available, based on reverse engeneering by Stephane Marchesin */
802 if ((flags & (RADEON_DEPTH | RADEON_STENCIL)) && (flags & RADEON_CLEAR_FASTZ)) {
803
804 int i;
805 int depthpixperline = dev_priv->depth_fmt==RADEON_DEPTH_FORMAT_16BIT_INT_Z?
806 (dev_priv->depth_pitch / 2): (dev_priv->depth_pitch / 4);
807
808 u32 clearmask;
809
810 u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth |
811 ((clear->depth_mask & 0xff) << 24);
812
813
814 /* Make sure we restore the 3D state next time.
815 * we haven't touched any "normal" state - still need this?
816 */
817 dev_priv->sarea_priv->ctx_owner = 0;
818
819 if ((dev_priv->flags & CHIP_HAS_HIERZ) && (flags & RADEON_USE_HIERZ)) {
820 /* FIXME : reverse engineer that for Rx00 cards */
821 /* FIXME : the mask supposedly contains low-res z values. So can't set
822 just to the max (0xff? or actually 0x3fff?), need to take z clear
823 value into account? */
824 /* pattern seems to work for r100, though get slight
825 rendering errors with glxgears. If hierz is not enabled for r100,
826 only 4 bits which indicate clear (15,16,31,32, all zero) matter, the
827 other ones are ignored, and the same clear mask can be used. That's
828 very different behaviour than R200 which needs different clear mask
829 and different number of tiles to clear if hierz is enabled or not !?!
830 */
831 clearmask = (0xff<<22)|(0xff<<6)| 0x003f003f;
832 }
833 else {
834 /* clear mask : chooses the clearing pattern.
835 rv250: could be used to clear only parts of macrotiles
836 (but that would get really complicated...)?
837 bit 0 and 1 (either or both of them ?!?!) are used to
838 not clear tile (or maybe one of the bits indicates if the tile is
839 compressed or not), bit 2 and 3 to not clear tile 1,...,.
840 Pattern is as follows:
841 | 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
842 bits -------------------------------------------------
843 | 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
844 rv100: clearmask covers 2x8 4x1 tiles, but one clear still
845 covers 256 pixels ?!?
846 */
847 clearmask = 0x0;
848 }
849
850 BEGIN_RING( 8 );
851 RADEON_WAIT_UNTIL_2D_IDLE();
852 OUT_RING_REG( RADEON_RB3D_DEPTHCLEARVALUE,
853 tempRB3D_DEPTHCLEARVALUE);
854 /* what offset is this exactly ? */
855 OUT_RING_REG( RADEON_RB3D_ZMASKOFFSET, 0 );
856 /* need ctlstat, otherwise get some strange black flickering */
857 OUT_RING_REG( RADEON_RB3D_ZCACHE_CTLSTAT, RADEON_RB3D_ZC_FLUSH_ALL );
858 ADVANCE_RING();
859
860 for (i = 0; i < nbox; i++) {
861 int tileoffset, nrtilesx, nrtilesy, j;
862 /* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
863 if ((dev_priv->flags&CHIP_HAS_HIERZ) && !(dev_priv->microcode_version==UCODE_R200)) {
864 /* FIXME : figure this out for r200 (when hierz is enabled). Or
865 maybe r200 actually doesn't need to put the low-res z value into
866 the tile cache like r100, but just needs to clear the hi-level z-buffer?
867 Works for R100, both with hierz and without.
868 R100 seems to operate on 2x1 8x8 tiles, but...
869 odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially
870 problematic with resolutions which are not 64 pix aligned? */
871 tileoffset = ((pbox[i].y1 >> 3) * depthpixperline + pbox[i].x1) >> 6;
872 nrtilesx = ((pbox[i].x2 & ~63) - (pbox[i].x1 & ~63)) >> 4;
873 nrtilesy = (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
874 for (j = 0; j <= nrtilesy; j++) {
875 BEGIN_RING( 4 );
876 OUT_RING( CP_PACKET3( RADEON_3D_CLEAR_ZMASK, 2 ) );
877 /* first tile */
878 OUT_RING( tileoffset * 8 );
879 /* the number of tiles to clear */
880 OUT_RING( nrtilesx + 4 );
881 /* clear mask : chooses the clearing pattern. */
882 OUT_RING( clearmask );
883 ADVANCE_RING();
884 tileoffset += depthpixperline >> 6;
885 }
886 }
887 else if (dev_priv->microcode_version==UCODE_R200) {
888 /* works for rv250. */
889 /* find first macro tile (8x2 4x4 z-pixels on rv250) */
890 tileoffset = ((pbox[i].y1 >> 3) * depthpixperline + pbox[i].x1) >> 5;
891 nrtilesx = (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
892 nrtilesy = (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
893 for (j = 0; j <= nrtilesy; j++) {
894 BEGIN_RING( 4 );
895 OUT_RING( CP_PACKET3( RADEON_3D_CLEAR_ZMASK, 2 ) );
896 /* first tile */
897 /* judging by the first tile offset needed, could possibly
898 directly address/clear 4x4 tiles instead of 8x2 * 4x4
899 macro tiles, though would still need clear mask for
900 right/bottom if truely 4x4 granularity is desired ? */
901 OUT_RING( tileoffset * 16 );
902 /* the number of tiles to clear */
903 OUT_RING( nrtilesx + 1 );
904 /* clear mask : chooses the clearing pattern. */
905 OUT_RING( clearmask );
906 ADVANCE_RING();
907 tileoffset += depthpixperline >> 5;
908 }
909 }
910 else { /* rv 100 */
911 /* rv100 might not need 64 pix alignment, who knows */
912 /* offsets are, hmm, weird */
913 tileoffset = ((pbox[i].y1 >> 4) * depthpixperline + pbox[i].x1) >> 6;
914 nrtilesx = ((pbox[i].x2 & ~63) - (pbox[i].x1 & ~63)) >> 4;
915 nrtilesy = (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
916 for (j = 0; j <= nrtilesy; j++) {
917 BEGIN_RING( 4 );
918 OUT_RING( CP_PACKET3( RADEON_3D_CLEAR_ZMASK, 2 ) );
919 OUT_RING( tileoffset * 128 );
920 /* the number of tiles to clear */
921 OUT_RING( nrtilesx + 4 );
922 /* clear mask : chooses the clearing pattern. */
923 OUT_RING( clearmask );
924 ADVANCE_RING();
925 tileoffset += depthpixperline >> 6;
926 }
927 }
928 }
929
930 /* TODO don't always clear all hi-level z tiles */
931 if ((dev_priv->flags & CHIP_HAS_HIERZ) && (dev_priv->microcode_version==UCODE_R200)
932 && (flags & RADEON_USE_HIERZ))
933 /* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
934 /* FIXME : the mask supposedly contains low-res z values. So can't set
935 just to the max (0xff? or actually 0x3fff?), need to take z clear
936 value into account? */
937 {
938 BEGIN_RING( 4 );
939 OUT_RING( CP_PACKET3( RADEON_3D_CLEAR_HIZ, 2 ) );
940 OUT_RING( 0x0 ); /* First tile */
941 OUT_RING( 0x3cc0 );
942 OUT_RING( (0xff<<22)|(0xff<<6)| 0x003f003f);
943 ADVANCE_RING();
944 }
945 }
946
947 /* We have to clear the depth and/or stencil buffers by
948 * rendering a quad into just those buffers. Thus, we have to
949 * make sure the 3D engine is configured correctly.
950 */
951 if ((dev_priv->microcode_version == UCODE_R200) &&
952 (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
953
954 int tempPP_CNTL;
955 int tempRE_CNTL;
956 int tempRB3D_CNTL;
957 int tempRB3D_ZSTENCILCNTL;
958 int tempRB3D_STENCILREFMASK;
959 int tempRB3D_PLANEMASK;
960 int tempSE_CNTL;
961 int tempSE_VTE_CNTL;
962 int tempSE_VTX_FMT_0;
963 int tempSE_VTX_FMT_1;
964 int tempSE_VAP_CNTL;
965 int tempRE_AUX_SCISSOR_CNTL;
966
967 tempPP_CNTL = 0;
968 tempRE_CNTL = 0;
969
970 tempRB3D_CNTL = depth_clear->rb3d_cntl;
971
972 tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
973 tempRB3D_STENCILREFMASK = 0x0;
974
975 tempSE_CNTL = depth_clear->se_cntl;
976
977
978
979 /* Disable TCL */
980
981 tempSE_VAP_CNTL = (/* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */
982 (0x9 << SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
983
984 tempRB3D_PLANEMASK = 0x0;
985
986 tempRE_AUX_SCISSOR_CNTL = 0x0;
987
988 tempSE_VTE_CNTL =
989 SE_VTE_CNTL__VTX_XY_FMT_MASK |
990 SE_VTE_CNTL__VTX_Z_FMT_MASK;
991
992 /* Vertex format (X, Y, Z, W)*/
993 tempSE_VTX_FMT_0 =
994 SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
995 SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
996 tempSE_VTX_FMT_1 = 0x0;
997
998
999 /*
1000 * Depth buffer specific enables
1001 */
1002 if (flags & RADEON_DEPTH) {
1003 /* Enable depth buffer */
1004 tempRB3D_CNTL |= RADEON_Z_ENABLE;
1005 } else {
1006 /* Disable depth buffer */
1007 tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
1008 }
1009
1010 /*
1011 * Stencil buffer specific enables
1012 */
1013 if ( flags & RADEON_STENCIL ) {
1014 tempRB3D_CNTL |= RADEON_STENCIL_ENABLE;
1015 tempRB3D_STENCILREFMASK = clear->depth_mask;
1016 } else {
1017 tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
1018 tempRB3D_STENCILREFMASK = 0x00000000;
1019 }
1020
1021 if (flags & RADEON_USE_COMP_ZBUF) {
1022 tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
1023 RADEON_Z_DECOMPRESSION_ENABLE;
1024 }
1025 if (flags & RADEON_USE_HIERZ) {
1026 tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
1027 }
1028
1029 BEGIN_RING( 26 );
1030 RADEON_WAIT_UNTIL_2D_IDLE();
1031
1032 OUT_RING_REG( RADEON_PP_CNTL, tempPP_CNTL );
1033 OUT_RING_REG( R200_RE_CNTL, tempRE_CNTL );
1034 OUT_RING_REG( RADEON_RB3D_CNTL, tempRB3D_CNTL );
1035 OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL,
1036 tempRB3D_ZSTENCILCNTL );
1037 OUT_RING_REG( RADEON_RB3D_STENCILREFMASK,
1038 tempRB3D_STENCILREFMASK );
1039 OUT_RING_REG( RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK );
1040 OUT_RING_REG( RADEON_SE_CNTL, tempSE_CNTL );
1041 OUT_RING_REG( R200_SE_VTE_CNTL, tempSE_VTE_CNTL );
1042 OUT_RING_REG( R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0 );
1043 OUT_RING_REG( R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1 );
1044 OUT_RING_REG( R200_SE_VAP_CNTL, tempSE_VAP_CNTL );
1045 OUT_RING_REG( R200_RE_AUX_SCISSOR_CNTL,
1046 tempRE_AUX_SCISSOR_CNTL );
1047 ADVANCE_RING();
1048
1049 /* Make sure we restore the 3D state next time.
1050 */
1051 dev_priv->sarea_priv->ctx_owner = 0;
1052
1053 for ( i = 0 ; i < nbox ; i++ ) {
1054
1055 /* Funny that this should be required --
1056 * sets top-left?
1057 */
1058 radeon_emit_clip_rect( dev_priv,
1059 &sarea_priv->boxes[i] );
1060
1061 BEGIN_RING( 14 );
1062 OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 12 ) );
1063 OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST |
1064 RADEON_PRIM_WALK_RING |
1065 (3 << RADEON_NUM_VERTICES_SHIFT)) );
1066 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
1067 OUT_RING( depth_boxes[i].ui[CLEAR_Y1] );
1068 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
1069 OUT_RING( 0x3f800000 );
1070 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
1071 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
1072 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
1073 OUT_RING( 0x3f800000 );
1074 OUT_RING( depth_boxes[i].ui[CLEAR_X2] );
1075 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
1076 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
1077 OUT_RING( 0x3f800000 );
1078 ADVANCE_RING();
1079 }
1080 }
1081 else if ( (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) {
1082
1083 int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
1084
1085 rb3d_cntl = depth_clear->rb3d_cntl;
1086
1087 if ( flags & RADEON_DEPTH ) {
1088 rb3d_cntl |= RADEON_Z_ENABLE;
1089 } else {
1090 rb3d_cntl &= ~RADEON_Z_ENABLE;
1091 }
1092
1093 if ( flags & RADEON_STENCIL ) {
1094 rb3d_cntl |= RADEON_STENCIL_ENABLE;
1095 rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */
1096 } else {
1097 rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
1098 rb3d_stencilrefmask = 0x00000000;
1099 }
1100
1101 if (flags & RADEON_USE_COMP_ZBUF) {
1102 tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
1103 RADEON_Z_DECOMPRESSION_ENABLE;
1104 }
1105 if (flags & RADEON_USE_HIERZ) {
1106 tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
1107 }
1108
1109 BEGIN_RING( 13 );
1110 RADEON_WAIT_UNTIL_2D_IDLE();
1111
1112 OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 1 ) );
1113 OUT_RING( 0x00000000 );
1114 OUT_RING( rb3d_cntl );
1115
1116 OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL );
1117 OUT_RING_REG( RADEON_RB3D_STENCILREFMASK,
1118 rb3d_stencilrefmask );
1119 OUT_RING_REG( RADEON_RB3D_PLANEMASK,
1120 0x00000000 );
1121 OUT_RING_REG( RADEON_SE_CNTL,
1122 depth_clear->se_cntl );
1123 ADVANCE_RING();
1124
1125 /* Make sure we restore the 3D state next time.
1126 */
1127 dev_priv->sarea_priv->ctx_owner = 0;
1128
1129 for ( i = 0 ; i < nbox ; i++ ) {
1130
1131 /* Funny that this should be required --
1132 * sets top-left?
1133 */
1134 radeon_emit_clip_rect( dev_priv,
1135 &sarea_priv->boxes[i] );
1136
1137 BEGIN_RING( 15 );
1138
1139 OUT_RING( CP_PACKET3( RADEON_3D_DRAW_IMMD, 13 ) );
1140 OUT_RING( RADEON_VTX_Z_PRESENT |
1141 RADEON_VTX_PKCOLOR_PRESENT);
1142 OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST |
1143 RADEON_PRIM_WALK_RING |
1144 RADEON_MAOS_ENABLE |
1145 RADEON_VTX_FMT_RADEON_MODE |
1146 (3 << RADEON_NUM_VERTICES_SHIFT)) );
1147
1148
1149 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
1150 OUT_RING( depth_boxes[i].ui[CLEAR_Y1] );
1151 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
1152 OUT_RING( 0x0 );
1153
1154 OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
1155 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
1156 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
1157 OUT_RING( 0x0 );
1158
1159 OUT_RING( depth_boxes[i].ui[CLEAR_X2] );
1160 OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
1161 OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
1162 OUT_RING( 0x0 );
1163
1164 ADVANCE_RING();
1165 }
1166 }
1167
1168 /* Increment the clear counter. The client-side 3D driver must
1169 * wait on this value before performing the clear ioctl. We
1170 * need this because the card's so damned fast...
1171 */
1172 dev_priv->sarea_priv->last_clear++;
1173
1174 BEGIN_RING( 4 );
1175
1176 RADEON_CLEAR_AGE( dev_priv->sarea_priv->last_clear );
1177 RADEON_WAIT_UNTIL_IDLE();
1178
1179 ADVANCE_RING();
1180 }
1181
1182 static void radeon_cp_dispatch_swap( drm_device_t *dev )
1183 {
1184 drm_radeon_private_t *dev_priv = dev->dev_private;
1185 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1186 int nbox = sarea_priv->nbox;
1187 drm_clip_rect_t *pbox = sarea_priv->boxes;
1188 int i;
1189 RING_LOCALS;
1190 DRM_DEBUG( "\n" );
1191
1192 /* Do some trivial performance monitoring...
1193 */
1194 if (dev_priv->do_boxes)
1195 radeon_cp_performance_boxes( dev_priv );
1196
1197
1198 /* Wait for the 3D stream to idle before dispatching the bitblt.
1199 * This will prevent data corruption between the two streams.
1200 */
1201 BEGIN_RING( 2 );
1202
1203 RADEON_WAIT_UNTIL_3D_IDLE();
1204
1205 ADVANCE_RING();
1206
1207 for ( i = 0 ; i < nbox ; i++ ) {
1208 int x = pbox[i].x1;
1209 int y = pbox[i].y1;
1210 int w = pbox[i].x2 - x;
1211 int h = pbox[i].y2 - y;
1212
1213 DRM_DEBUG( "dispatch swap %d,%d-%d,%d\n",
1214 x, y, w, h );
1215
1216 BEGIN_RING( 7 );
1217
1218 OUT_RING( CP_PACKET3( RADEON_CNTL_BITBLT_MULTI, 5 ) );
1219 OUT_RING( RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1220 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1221 RADEON_GMC_BRUSH_NONE |
1222 (dev_priv->color_fmt << 8) |
1223 RADEON_GMC_SRC_DATATYPE_COLOR |
1224 RADEON_ROP3_S |
1225 RADEON_DP_SRC_SOURCE_MEMORY |
1226 RADEON_GMC_CLR_CMP_CNTL_DIS |
1227 RADEON_GMC_WR_MSK_DIS );
1228
1229 /* Make this work even if front & back are flipped:
1230 */
1231 if (dev_priv->current_page == 0) {
1232 OUT_RING( dev_priv->back_pitch_offset );
1233 OUT_RING( dev_priv->front_pitch_offset );
1234 }
1235 else {
1236 OUT_RING( dev_priv->front_pitch_offset );
1237 OUT_RING( dev_priv->back_pitch_offset );
1238 }
1239
1240 OUT_RING( (x << 16) | y );
1241 OUT_RING( (x << 16) | y );
1242 OUT_RING( (w << 16) | h );
1243
1244 ADVANCE_RING();
1245 }
1246
1247 /* Increment the frame counter. The client-side 3D driver must
1248 * throttle the framerate by waiting for this value before
1249 * performing the swapbuffer ioctl.
1250 */
1251 dev_priv->sarea_priv->last_frame++;
1252
1253 BEGIN_RING( 4 );
1254
1255 RADEON_FRAME_AGE( dev_priv->sarea_priv->last_frame );
1256 RADEON_WAIT_UNTIL_2D_IDLE();
1257
1258 ADVANCE_RING();
1259 }
1260
1261 static void radeon_cp_dispatch_flip( drm_device_t *dev )
1262 {
1263 drm_radeon_private_t *dev_priv = dev->dev_private;
1264 drm_sarea_t *sarea = (drm_sarea_t *)dev_priv->sarea->handle;
1265 int offset = (dev_priv->current_page == 1)
1266 ? dev_priv->front_offset : dev_priv->back_offset;
1267 RING_LOCALS;
1268 DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n",
1269 __FUNCTION__,
1270 dev_priv->current_page,
1271 dev_priv->sarea_priv->pfCurrentPage);
1272
1273 /* Do some trivial performance monitoring...
1274 */
1275 if (dev_priv->do_boxes) {
1276 dev_priv->stats.boxes |= RADEON_BOX_FLIP;
1277 radeon_cp_performance_boxes( dev_priv );
1278 }
1279
1280 /* Update the frame offsets for both CRTCs
1281 */
1282 BEGIN_RING( 6 );
1283
1284 RADEON_WAIT_UNTIL_3D_IDLE();
1285 OUT_RING_REG( RADEON_CRTC_OFFSET, ( ( sarea->frame.y * dev_priv->front_pitch
1286 + sarea->frame.x
1287 * ( dev_priv->color_fmt - 2 ) ) & ~7 )
1288 + offset );
1289 OUT_RING_REG( RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base
1290 + offset );
1291
1292 ADVANCE_RING();
1293
1294 /* Increment the frame counter. The client-side 3D driver must
1295 * throttle the framerate by waiting for this value before
1296 * performing the swapbuffer ioctl.
1297 */
1298 dev_priv->sarea_priv->last_frame++;
1299 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
1300 1 - dev_priv->current_page;
1301
1302 BEGIN_RING( 2 );
1303
1304 RADEON_FRAME_AGE( dev_priv->sarea_priv->last_frame );
1305
1306 ADVANCE_RING();
1307 }
1308
1309 static int bad_prim_vertex_nr( int primitive, int nr )
1310 {
1311 switch (primitive & RADEON_PRIM_TYPE_MASK) {
1312 case RADEON_PRIM_TYPE_NONE:
1313 case RADEON_PRIM_TYPE_POINT:
1314 return nr < 1;
1315 case RADEON_PRIM_TYPE_LINE:
1316 return (nr & 1) || nr == 0;
1317 case RADEON_PRIM_TYPE_LINE_STRIP:
1318 return nr < 2;
1319 case RADEON_PRIM_TYPE_TRI_LIST:
1320 case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
1321 case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
1322 case RADEON_PRIM_TYPE_RECT_LIST:
1323 return nr % 3 || nr == 0;
1324 case RADEON_PRIM_TYPE_TRI_FAN:
1325 case RADEON_PRIM_TYPE_TRI_STRIP:
1326 return nr < 3;
1327 default:
1328 return 1;
1329 }
1330 }
1331
1332
1333
1334 typedef struct {
1335 unsigned int start;
1336 unsigned int finish;
1337 unsigned int prim;
1338 unsigned int numverts;
1339 unsigned int offset;
1340 unsigned int vc_format;
1341 } drm_radeon_tcl_prim_t;
1342
1343 static void radeon_cp_dispatch_vertex( drm_device_t *dev,
1344 drm_buf_t *buf,
1345 drm_radeon_tcl_prim_t *prim )
1346
1347 {
1348 drm_radeon_private_t *dev_priv = dev->dev_private;
1349 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1350 int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
1351 int numverts = (int)prim->numverts;
1352 int nbox = sarea_priv->nbox;
1353 int i = 0;
1354 RING_LOCALS;
1355
1356 DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
1357 prim->prim,
1358 prim->vc_format,
1359 prim->start,
1360 prim->finish,
1361 prim->numverts);
1362
1363 if (bad_prim_vertex_nr( prim->prim, prim->numverts )) {
1364 DRM_ERROR( "bad prim %x numverts %d\n",
1365 prim->prim, prim->numverts );
1366 return;
1367 }
1368
1369 do {
1370 /* Emit the next cliprect */
1371 if ( i < nbox ) {
1372 radeon_emit_clip_rect( dev_priv,
1373 &sarea_priv->boxes[i] );
1374 }
1375
1376 /* Emit the vertex buffer rendering commands */
1377 BEGIN_RING( 5 );
1378
1379 OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) );
1380 OUT_RING( offset );
1381 OUT_RING( numverts );
1382 OUT_RING( prim->vc_format );
1383 OUT_RING( prim->prim | RADEON_PRIM_WALK_LIST |
1384 RADEON_COLOR_ORDER_RGBA |
1385 RADEON_VTX_FMT_RADEON_MODE |
1386 (numverts << RADEON_NUM_VERTICES_SHIFT) );
1387
1388 ADVANCE_RING();
1389
1390 i++;
1391 } while ( i < nbox );
1392 }
1393
1394
1395
1396 static void radeon_cp_discard_buffer( drm_device_t *dev, drm_buf_t *buf )
1397 {
1398 drm_radeon_private_t *dev_priv = dev->dev_private;
1399 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
1400 RING_LOCALS;
1401
1402 buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
1403
1404 /* Emit the vertex buffer age */
1405 BEGIN_RING( 2 );
1406 RADEON_DISPATCH_AGE( buf_priv->age );
1407 ADVANCE_RING();
1408
1409 buf->pending = 1;
1410 buf->used = 0;
1411 }
1412
1413 static void radeon_cp_dispatch_indirect( drm_device_t *dev,
1414 drm_buf_t *buf,
1415 int start, int end )
1416 {
1417 drm_radeon_private_t *dev_priv = dev->dev_private;
1418 RING_LOCALS;
1419 DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n",
1420 buf->idx, start, end );
1421
1422 if ( start != end ) {
1423 int offset = (dev_priv->gart_buffers_offset
1424 + buf->offset + start);
1425 int dwords = (end - start + 3) / sizeof(u32);
1426
1427 /* Indirect buffer data must be an even number of
1428 * dwords, so if we've been given an odd number we must
1429 * pad the data with a Type-2 CP packet.
1430 */
1431 if ( dwords & 1 ) {
1432 u32 *data = (u32 *)
1433 ((char *)dev->agp_buffer_map->handle
1434 + buf->offset + start);
1435 data[dwords++] = RADEON_CP_PACKET2;
1436 }
1437
1438 /* Fire off the indirect buffer */
1439 BEGIN_RING( 3 );
1440
1441 OUT_RING( CP_PACKET0( RADEON_CP_IB_BASE, 1 ) );
1442 OUT_RING( offset );
1443 OUT_RING( dwords );
1444
1445 ADVANCE_RING();
1446 }
1447 }
1448
1449
1450 static void radeon_cp_dispatch_indices( drm_device_t *dev,
1451 drm_buf_t *elt_buf,
1452 drm_radeon_tcl_prim_t *prim )
1453 {
1454 drm_radeon_private_t *dev_priv = dev->dev_private;
1455 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1456 int offset = dev_priv->gart_buffers_offset + prim->offset;
1457 u32 *data;
1458 int dwords;
1459 int i = 0;
1460 int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
1461 int count = (prim->finish - start) / sizeof(u16);
1462 int nbox = sarea_priv->nbox;
1463
1464 DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
1465 prim->prim,
1466 prim->vc_format,
1467 prim->start,
1468 prim->finish,
1469 prim->offset,
1470 prim->numverts);
1471
1472 if (bad_prim_vertex_nr( prim->prim, count )) {
1473 DRM_ERROR( "bad prim %x count %d\n",
1474 prim->prim, count );
1475 return;
1476 }
1477
1478
1479 if ( start >= prim->finish ||
1480 (prim->start & 0x7) ) {
1481 DRM_ERROR( "buffer prim %d\n", prim->prim );
1482 return;
1483 }
1484
1485 dwords = (prim->finish - prim->start + 3) / sizeof(u32);
1486
1487 data = (u32 *)((char *)dev->agp_buffer_map->handle +
1488 elt_buf->offset + prim->start);
1489
1490 data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 );
1491 data[1] = offset;
1492 data[2] = prim->numverts;
1493 data[3] = prim->vc_format;
1494 data[4] = (prim->prim |
1495 RADEON_PRIM_WALK_IND |
1496 RADEON_COLOR_ORDER_RGBA |
1497 RADEON_VTX_FMT_RADEON_MODE |
1498 (count << RADEON_NUM_VERTICES_SHIFT) );
1499
1500 do {
1501 if ( i < nbox )
1502 radeon_emit_clip_rect( dev_priv,
1503 &sarea_priv->boxes[i] );
1504
1505 radeon_cp_dispatch_indirect( dev, elt_buf,
1506 prim->start,
1507 prim->finish );
1508
1509 i++;
1510 } while ( i < nbox );
1511
1512 }
1513
1514 #define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
1515
1516 static int radeon_cp_dispatch_texture( DRMFILE filp,
1517 drm_device_t *dev,
1518 drm_radeon_texture_t *tex,
1519 drm_radeon_tex_image_t *image )
1520 {
1521 drm_radeon_private_t *dev_priv = dev->dev_private;
1522 drm_file_t *filp_priv;
1523 drm_buf_t *buf;
1524 u32 format;
1525 u32 *buffer;
1526 const u8 __user *data;
1527 int size, dwords, tex_width, blit_width, spitch;
1528 u32 height;
1529 int i;
1530 u32 texpitch, microtile;
1531 u32 offset;
1532 RING_LOCALS;
1533
1534 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
1535
1536 if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &tex->offset ) ) {
1537 DRM_ERROR( "Invalid destination offset\n" );
1538 return DRM_ERR( EINVAL );
1539 }
1540
1541 dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
1542
1543 /* Flush the pixel cache. This ensures no pixel data gets mixed
1544 * up with the texture data from the host data blit, otherwise
1545 * part of the texture image may be corrupted.
1546 */
1547 BEGIN_RING( 4 );
1548 RADEON_FLUSH_CACHE();
1549 RADEON_WAIT_UNTIL_IDLE();
1550 ADVANCE_RING();
1551
1552 /* The compiler won't optimize away a division by a variable,
1553 * even if the only legal values are powers of two. Thus, we'll
1554 * use a shift instead.
1555 */
1556 switch ( tex->format ) {
1557 case RADEON_TXFORMAT_ARGB8888:
1558 case RADEON_TXFORMAT_RGBA8888:
1559 format = RADEON_COLOR_FORMAT_ARGB8888;
1560 tex_width = tex->width * 4;
1561 blit_width = image->width * 4;
1562 break;
1563 case RADEON_TXFORMAT_AI88:
1564 case RADEON_TXFORMAT_ARGB1555:
1565 case RADEON_TXFORMAT_RGB565:
1566 case RADEON_TXFORMAT_ARGB4444:
1567 case RADEON_TXFORMAT_VYUY422:
1568 case RADEON_TXFORMAT_YVYU422:
1569 format = RADEON_COLOR_FORMAT_RGB565;
1570 tex_width = tex->width * 2;
1571 blit_width = image->width * 2;
1572 break;
1573 case RADEON_TXFORMAT_I8:
1574 case RADEON_TXFORMAT_RGB332:
1575 format = RADEON_COLOR_FORMAT_CI8;
1576 tex_width = tex->width * 1;
1577 blit_width = image->width * 1;
1578 break;
1579 default:
1580 DRM_ERROR( "invalid texture format %d\n", tex->format );
1581 return DRM_ERR(EINVAL);
1582 }
1583 spitch = blit_width >> 6;
1584 if (spitch == 0 && image->height > 1)
1585 return DRM_ERR(EINVAL);
1586
1587 texpitch = tex->pitch;
1588 if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
1589 microtile = 1;
1590 if (tex_width < 64) {
1591 texpitch &= ~(RADEON_DST_TILE_MICRO >> 22);
1592 /* we got tiled coordinates, untile them */
1593 image->x *= 2;
1594 }
1595 }
1596 else microtile = 0;
1597
1598 DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width );
1599
1600 do {
1601 DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
1602 tex->offset >> 10, tex->pitch, tex->format,
1603 image->x, image->y, image->width, image->height );
1604
1605 /* Make a copy of some parameters in case we have to
1606 * update them for a multi-pass texture blit.
1607 */
1608 height = image->height;
1609 data = (const u8 __user *)image->data;
1610
1611 size = height * blit_width;
1612
1613 if ( size > RADEON_MAX_TEXTURE_SIZE ) {
1614 height = RADEON_MAX_TEXTURE_SIZE / blit_width;
1615 size = height * blit_width;
1616 } else if ( size < 4 && size > 0 ) {
1617 size = 4;
1618 } else if ( size == 0 ) {
1619 return 0;
1620 }
1621
1622 buf = radeon_freelist_get( dev );
1623 if ( 0 && !buf ) {
1624 radeon_do_cp_idle( dev_priv );
1625 buf = radeon_freelist_get( dev );
1626 }
1627 if ( !buf ) {
1628 DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
1629 if (DRM_COPY_TO_USER( tex->image, image, sizeof(*image) ))
1630 return DRM_ERR(EFAULT);
1631 return DRM_ERR(EAGAIN);
1632 }
1633
1634
1635 /* Dispatch the indirect buffer.
1636 */
1637 buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset);
1638 dwords = size / 4;
1639
1640 if (microtile) {
1641 /* texture micro tiling in use, minimum texture width is thus 16 bytes.
1642 however, we cannot use blitter directly for texture width < 64 bytes,
1643 since minimum tex pitch is 64 bytes and we need this to match
1644 the texture width, otherwise the blitter will tile it wrong.
1645 Thus, tiling manually in this case. Additionally, need to special
1646 case tex height = 1, since our actual image will have height 2
1647 and we need to ensure we don't read beyond the texture size
1648 from user space. */
1649 if (tex->height == 1) {
1650 if (tex_width >= 64 || tex_width <= 16) {
1651 if (DRM_COPY_FROM_USER(buffer, data,
1652 tex_width * sizeof(u32))) {
1653 DRM_ERROR("EFAULT on pad, %d bytes\n",
1654 tex_width);
1655 return DRM_ERR(EFAULT);
1656 }
1657 } else if (tex_width == 32) {
1658 if (DRM_COPY_FROM_USER(buffer, data, 16)) {
1659 DRM_ERROR("EFAULT on pad, %d bytes\n",
1660 tex_width);
1661 return DRM_ERR(EFAULT);
1662 }
1663 if (DRM_COPY_FROM_USER(buffer + 8, data + 16, 16)) {
1664 DRM_ERROR("EFAULT on pad, %d bytes\n",
1665 tex_width);
1666 return DRM_ERR(EFAULT);
1667 }
1668 }
1669 } else if (tex_width >= 64 || tex_width == 16) {
1670 if (DRM_COPY_FROM_USER(buffer, data,
1671 dwords * sizeof(u32))) {
1672 DRM_ERROR("EFAULT on data, %d dwords\n",
1673 dwords);
1674 return DRM_ERR(EFAULT);
1675 }
1676 } else if (tex_width < 16) {
1677 for (i = 0; i < tex->height; i++) {
1678 if (DRM_COPY_FROM_USER(buffer, data, tex_width)) {
1679 DRM_ERROR("EFAULT on pad, %d bytes\n",
1680 tex_width);
1681 return DRM_ERR(EFAULT);
1682 }
1683 buffer += 4;
1684 data += tex_width;
1685 }
1686 } else if (tex_width == 32) {
1687 /* TODO: make sure this works when not fitting in one buffer
1688 (i.e. 32bytes x 2048...) */
1689 for (i = 0; i < tex->height; i += 2) {
1690 if (DRM_COPY_FROM_USER(buffer, data, 16)) {
1691 DRM_ERROR("EFAULT on pad, %d bytes\n",
1692 tex_width);
1693 return DRM_ERR(EFAULT);
1694 }
1695 data += 16;
1696 if (DRM_COPY_FROM_USER(buffer + 8, data, 16)) {
1697 DRM_ERROR("EFAULT on pad, %d bytes\n",
1698 tex_width);
1699 return DRM_ERR(EFAULT);
1700 }
1701 data += 16;
1702 if (DRM_COPY_FROM_USER(buffer + 4, data, 16)) {
1703 DRM_ERROR("EFAULT on pad, %d bytes\n",
1704 tex_width);
1705 return DRM_ERR(EFAULT);
1706 }
1707 data += 16;
1708 if (DRM_COPY_FROM_USER(buffer + 12, data, 16)) {
1709 DRM_ERROR("EFAULT on pad, %d bytes\n",
1710 tex_width);
1711 return DRM_ERR(EFAULT);
1712 }
1713 data += 16;
1714 buffer += 16;
1715 }
1716 }
1717 }
1718 else {
1719 if (tex_width >= 32) {
1720 /* Texture image width is larger than the minimum, so we
1721 * can upload it directly.
1722 */
1723 if (DRM_COPY_FROM_USER(buffer, data,
1724 dwords * sizeof(u32))) {
1725 DRM_ERROR("EFAULT on data, %d dwords\n",
1726 dwords);
1727 return DRM_ERR(EFAULT);
1728 }
1729 } else {
1730 /* Texture image width is less than the minimum, so we
1731 * need to pad out each image scanline to the minimum
1732 * width.
1733 */
1734 for (i = 0 ; i < tex->height ; i++) {
1735 if (DRM_COPY_FROM_USER(buffer, data, tex_width )) {
1736 DRM_ERROR("EFAULT on pad, %d bytes\n", tex_width);
1737 return DRM_ERR(EFAULT);
1738 }
1739 buffer += 8;
1740 data += tex_width;
1741 }
1742 }
1743 }
1744
1745 buf->filp = filp;
1746 buf->used = size;
1747 offset = dev_priv->gart_buffers_offset + buf->offset;
1748 BEGIN_RING(9);
1749 OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
1750 OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1751 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1752 RADEON_GMC_BRUSH_NONE |
1753 (format << 8) |
1754 RADEON_GMC_SRC_DATATYPE_COLOR |
1755 RADEON_ROP3_S |
1756 RADEON_DP_SRC_SOURCE_MEMORY |
1757 RADEON_GMC_CLR_CMP_CNTL_DIS |
1758 RADEON_GMC_WR_MSK_DIS );
1759 OUT_RING((spitch << 22) | (offset >> 10));
1760 OUT_RING((texpitch << 22) | (tex->offset >> 10));
1761 OUT_RING(0);
1762 OUT_RING((image->x << 16) | image->y);
1763 OUT_RING((image->width << 16) | height);
1764 RADEON_WAIT_UNTIL_2D_IDLE();
1765 ADVANCE_RING();
1766
1767 radeon_cp_discard_buffer(dev, buf);
1768
1769 /* Update the input parameters for next time */
1770 image->y += height;
1771 image->height -= height;
1772 image->data = (const u8 __user *)image->data + size;
1773 } while (image->height > 0);
1774
1775 /* Flush the pixel cache after the blit completes. This ensures
1776 * the texture data is written out to memory before rendering
1777 * continues.
1778 */
1779 BEGIN_RING( 4 );
1780 RADEON_FLUSH_CACHE();
1781 RADEON_WAIT_UNTIL_2D_IDLE();
1782 ADVANCE_RING();
1783 return 0;
1784 }
1785
1786
1787 static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple )
1788 {
1789 drm_radeon_private_t *dev_priv = dev->dev_private;
1790 int i;
1791 RING_LOCALS;
1792 DRM_DEBUG( "\n" );
1793
1794 BEGIN_RING( 35 );
1795
1796 OUT_RING( CP_PACKET0( RADEON_RE_STIPPLE_ADDR, 0 ) );
1797 OUT_RING( 0x00000000 );
1798
1799 OUT_RING( CP_PACKET0_TABLE( RADEON_RE_STIPPLE_DATA, 31 ) );
1800 for ( i = 0 ; i < 32 ; i++ ) {
1801 OUT_RING( stipple[i] );
1802 }
1803
1804 ADVANCE_RING();
1805 }
1806
1807 static void radeon_apply_surface_regs(int surf_index, drm_radeon_private_t *dev_priv)
1808 {
1809 if (!dev_priv->mmio)
1810 return;
1811
1812 radeon_do_cp_idle(dev_priv);
1813
1814 RADEON_WRITE(RADEON_SURFACE0_INFO + 16*surf_index,
1815 dev_priv->surfaces[surf_index].flags);
1816 RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16*surf_index,
1817 dev_priv->surfaces[surf_index].lower);
1818 RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16*surf_index,
1819 dev_priv->surfaces[surf_index].upper);
1820 }
1821
1822
1823 /* Allocates a virtual surface
1824 * doesn't always allocate a real surface, will stretch an existing
1825 * surface when possible.
1826 *
1827 * Note that refcount can be at most 2, since during a free refcount=3
1828 * might mean we have to allocate a new surface which might not always
1829 * be available.
1830 * For example : we allocate three contigous surfaces ABC. If B is
1831 * freed, we suddenly need two surfaces to store A and C, which might
1832 * not always be available.
1833 */
1834 static int alloc_surface(drm_radeon_surface_alloc_t* new, drm_radeon_private_t *dev_priv, DRMFILE filp)
1835 {
1836 struct radeon_virt_surface *s;
1837 int i;
1838 int virt_surface_index;
1839 uint32_t new_upper, new_lower;
1840
1841 new_lower = new->address;
1842 new_upper = new_lower + new->size - 1;
1843
1844 /* sanity check */
1845 if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
1846 ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) != RADEON_SURF_ADDRESS_FIXED_MASK) ||
1847 ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
1848 return -1;
1849
1850 /* make sure there is no overlap with existing surfaces */
1851 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
1852 if ((dev_priv->surfaces[i].refcount != 0) &&
1853 (( (new_lower >= dev_priv->surfaces[i].lower) &&
1854 (new_lower < dev_priv->surfaces[i].upper) ) ||
1855 ( (new_lower < dev_priv->surfaces[i].lower) &&
1856 (new_upper > dev_priv->surfaces[i].lower) )) ){
1857 return -1;}
1858 }
1859
1860 /* find a virtual surface */
1861 for (i = 0; i < 2*RADEON_MAX_SURFACES; i++)
1862 if (dev_priv->virt_surfaces[i].filp == 0)
1863 break;
1864 if (i == 2*RADEON_MAX_SURFACES) {
1865 return -1;}
1866 virt_surface_index = i;
1867
1868 /* try to reuse an existing surface */
1869 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
1870 /* extend before */
1871 if ((dev_priv->surfaces[i].refcount == 1) &&
1872 (new->flags == dev_priv->surfaces[i].flags) &&
1873 (new_upper + 1 == dev_priv->surfaces[i].lower)) {
1874 s = &(dev_priv->virt_surfaces[virt_surface_index]);
1875 s->surface_index = i;
1876 s->lower = new_lower;
1877 s->upper = new_upper;
1878 s->flags = new->flags;
1879 s->filp = filp;
1880 dev_priv->surfaces[i].refcount++;
1881 dev_priv->surfaces[i].lower = s->lower;
1882 radeon_apply_surface_regs(s->surface_index, dev_priv);
1883 return virt_surface_index;
1884 }
1885
1886 /* extend after */
1887 if ((dev_priv->surfaces[i].refcount == 1) &&
1888 (new->flags == dev_priv->surfaces[i].flags) &&
1889 (new_lower == dev_priv->surfaces[i].upper + 1)) {
1890 s = &(dev_priv->virt_surfaces[virt_surface_index]);
1891 s->surface_index = i;
1892 s->lower = new_lower;
1893 s->upper = new_upper;
1894 s->flags = new->flags;
1895 s->filp = filp;
1896 dev_priv->surfaces[i].refcount++;
1897 dev_priv->surfaces[i].upper = s->upper;
1898 radeon_apply_surface_regs(s->surface_index, dev_priv);
1899 return virt_surface_index;
1900 }
1901 }
1902
1903 /* okay, we need a new one */
1904 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
1905 if (dev_priv->surfaces[i].refcount == 0) {
1906 s = &(dev_priv->virt_surfaces[virt_surface_index]);
1907 s->surface_index = i;
1908 s->lower = new_lower;
1909 s->upper = new_upper;
1910 s->flags = new->flags;
1911 s->filp = filp;
1912 dev_priv->surfaces[i].refcount = 1;
1913 dev_priv->surfaces[i].lower = s->lower;
1914 dev_priv->surfaces[i].upper = s->upper;
1915 dev_priv->surfaces[i].flags = s->flags;
1916 radeon_apply_surface_regs(s->surface_index, dev_priv);
1917 return virt_surface_index;
1918 }
1919 }
1920
1921 /* we didn't find anything */
1922 return -1;
1923 }
1924
1925 static int free_surface(DRMFILE filp, drm_radeon_private_t *dev_priv, int lower)
1926 {
1927 struct radeon_virt_surface *s;
1928 int i;
1929 /* find the virtual surface */
1930 for(i = 0; i < 2*RADEON_MAX_SURFACES; i++) {
1931 s = &(dev_priv->virt_surfaces[i]);
1932 if (s->filp) {
1933 if ((lower == s->lower) && (filp == s->filp)) {
1934 if (dev_priv->surfaces[s->surface_index].lower == s->lower)
1935 dev_priv->surfaces[s->surface_index].lower = s->upper;
1936
1937 if (dev_priv->surfaces[s->surface_index].upper == s->upper)
1938 dev_priv->surfaces[s->surface_index].upper = s->lower;
1939
1940 dev_priv->surfaces[s->surface_index].refcount--;
1941 if (dev_priv->surfaces[s->surface_index].refcount == 0)
1942 dev_priv->surfaces[s->surface_index].flags = 0;
1943 s->filp = NULL;
1944 radeon_apply_surface_regs(s->surface_index, dev_priv);
1945 return 0;
1946 }
1947 }
1948 }
1949 return 1;
1950 }
1951
1952 static void radeon_surfaces_release(DRMFILE filp, drm_radeon_private_t *dev_priv)
1953 {
1954 int i;
1955 for( i = 0; i < 2*RADEON_MAX_SURFACES; i++)
1956 {
1957 if (dev_priv->virt_surfaces[i].filp == filp)
1958 free_surface(filp, dev_priv, dev_priv->virt_surfaces[i].lower);
1959 }
1960 }
1961
1962 /* ================================================================
1963 * IOCTL functions
1964 */
1965 static int radeon_surface_alloc(DRM_IOCTL_ARGS)
1966 {
1967 DRM_DEVICE;
1968 drm_radeon_private_t *dev_priv = dev->dev_private;
1969 drm_radeon_surface_alloc_t alloc;
1970
1971 if (!dev_priv) {
1972 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1973 return DRM_ERR(EINVAL);
1974 }
1975
1976 DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_surface_alloc_t __user *)data,
1977 sizeof(alloc));
1978
1979 if (alloc_surface(&alloc, dev_priv, filp) == -1)
1980 return DRM_ERR(EINVAL);
1981 else
1982 return 0;
1983 }
1984
1985 static int radeon_surface_free(DRM_IOCTL_ARGS)
1986 {
1987 DRM_DEVICE;
1988 drm_radeon_private_t *dev_priv = dev->dev_private;
1989 drm_radeon_surface_free_t memfree;
1990
1991 if (!dev_priv) {
1992 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1993 return DRM_ERR(EINVAL);
1994 }
1995
1996 DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *)data,
1997 sizeof(memfree) );
1998
1999 if (free_surface(filp, dev_priv, memfree.address))
2000 return DRM_ERR(EINVAL);
2001 else
2002 return 0;
2003 }
2004
2005 static int radeon_cp_clear( DRM_IOCTL_ARGS )
2006 {
2007 DRM_DEVICE;
2008 drm_radeon_private_t *dev_priv = dev->dev_private;
2009 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2010 drm_radeon_clear_t clear;
2011 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
2012 DRM_DEBUG( "\n" );
2013
2014 LOCK_TEST_WITH_RETURN( dev, filp );
2015
2016 DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t __user *)data,
2017 sizeof(clear) );
2018
2019 RING_SPACE_TEST_WITH_RETURN( dev_priv );
2020
2021 if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )
2022 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
2023
2024 if ( DRM_COPY_FROM_USER( &depth_boxes, clear.depth_boxes,
2025 sarea_priv->nbox * sizeof(depth_boxes[0]) ) )
2026 return DRM_ERR(EFAULT);
2027
2028 radeon_cp_dispatch_clear( dev, &clear, depth_boxes );
2029
2030 COMMIT_RING();
2031 return 0;
2032 }
2033
2034
2035 /* Not sure why this isn't set all the time:
2036 */
2037 static int radeon_do_init_pageflip( drm_device_t *dev )
2038 {
2039 drm_radeon_private_t *dev_priv = dev->dev_private;
2040 RING_LOCALS;
2041
2042 DRM_DEBUG( "\n" );
2043
2044 BEGIN_RING( 6 );
2045 RADEON_WAIT_UNTIL_3D_IDLE();
2046 OUT_RING( CP_PACKET0( RADEON_CRTC_OFFSET_CNTL, 0 ) );
2047 OUT_RING( RADEON_READ( RADEON_CRTC_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL );
2048 OUT_RING( CP_PACKET0( RADEON_CRTC2_OFFSET_CNTL, 0 ) );
2049 OUT_RING( RADEON_READ( RADEON_CRTC2_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL );
2050 ADVANCE_RING();
2051
2052 dev_priv->page_flipping = 1;
2053 dev_priv->current_page = 0;
2054 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
2055
2056 return 0;
2057 }
2058
2059 /* Called whenever a client dies, from drm_release.
2060 * NOTE: Lock isn't necessarily held when this is called!
2061 */
2062 static int radeon_do_cleanup_pageflip( drm_device_t *dev )
2063 {
2064 drm_radeon_private_t *dev_priv = dev->dev_private;
2065 DRM_DEBUG( "\n" );
2066
2067 if (dev_priv->current_page != 0)
2068 radeon_cp_dispatch_flip( dev );
2069
2070 dev_priv->page_flipping = 0;
2071 return 0;
2072 }
2073
2074 /* Swapping and flipping are different operations, need different ioctls.
2075 * They can & should be intermixed to support multiple 3d windows.
2076 */
2077 static int radeon_cp_flip( DRM_IOCTL_ARGS )
2078 {
2079 DRM_DEVICE;
2080 drm_radeon_private_t *dev_priv = dev->dev_private;
2081 DRM_DEBUG( "\n" );
2082
2083 LOCK_TEST_WITH_RETURN( dev, filp );
2084
2085 RING_SPACE_TEST_WITH_RETURN( dev_priv );
2086
2087 if (!dev_priv->page_flipping)
2088 radeon_do_init_pageflip( dev );
2089
2090 radeon_cp_dispatch_flip( dev );
2091
2092 COMMIT_RING();
2093 return 0;
2094 }
2095
2096 static int radeon_cp_swap( DRM_IOCTL_ARGS )
2097 {
2098 DRM_DEVICE;
2099 drm_radeon_private_t *dev_priv = dev->dev_private;
2100 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2101 DRM_DEBUG( "\n" );
2102
2103 LOCK_TEST_WITH_RETURN( dev, filp );
2104
2105 RING_SPACE_TEST_WITH_RETURN( dev_priv );
2106
2107 if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )
2108 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
2109
2110 radeon_cp_dispatch_swap( dev );
2111 dev_priv->sarea_priv->ctx_owner = 0;
2112
2113 COMMIT_RING();
2114 return 0;
2115 }
2116
2117 static int radeon_cp_vertex( DRM_IOCTL_ARGS )
2118 {
2119 DRM_DEVICE;
2120 drm_radeon_private_t *dev_priv = dev->dev_private;
2121 drm_file_t *filp_priv;
2122 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2123 drm_device_dma_t *dma = dev->dma;
2124 drm_buf_t *buf;
2125 drm_radeon_vertex_t vertex;
2126 drm_radeon_tcl_prim_t prim;
2127
2128 LOCK_TEST_WITH_RETURN( dev, filp );
2129
2130 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
2131
2132 DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex_t __user *)data,
2133 sizeof(vertex) );
2134
2135 DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n",
2136 DRM_CURRENTPID,
2137 vertex.idx, vertex.count, vertex.discard );
2138
2139 if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {
2140 DRM_ERROR( "buffer index %d (of %d max)\n",
2141 vertex.idx, dma->buf_count - 1 );
2142 return DRM_ERR(EINVAL);
2143 }
2144 if ( vertex.prim < 0 ||
2145 vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {
2146 DRM_ERROR( "buffer prim %d\n", vertex.prim );
2147 return DRM_ERR(EINVAL);
2148 }
2149
2150 RING_SPACE_TEST_WITH_RETURN( dev_priv );
2151 VB_AGE_TEST_WITH_RETURN( dev_priv );
2152
2153 buf = dma->buflist[vertex.idx];
2154
2155 if ( buf->filp != filp ) {
2156 DRM_ERROR( "process %d using buffer owned by %p\n",
2157 DRM_CURRENTPID, buf->filp );
2158 return DRM_ERR(EINVAL);
2159 }
2160 if ( buf->pending ) {
2161 DRM_ERROR( "sending pending buffer %d\n", vertex.idx );
2162 return DRM_ERR(EINVAL);
2163 }
2164
2165 /* Build up a prim_t record:
2166 */
2167 if (vertex.count) {
2168 buf->used = vertex.count; /* not used? */
2169
2170 if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) {
2171 if ( radeon_emit_state( dev_priv, filp_priv,
2172 &sarea_priv->context_state,
2173 sarea_priv->tex_state,
2174 sarea_priv->dirty ) ) {
2175 DRM_ERROR( "radeon_emit_state failed\n" );
2176 return DRM_ERR( EINVAL );
2177 }
2178
2179 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
2180 RADEON_UPLOAD_TEX1IMAGES |
2181 RADEON_UPLOAD_TEX2IMAGES |
2182 RADEON_REQUIRE_QUIESCENCE);
2183 }
2184
2185 prim.start = 0;
2186 prim.finish = vertex.count; /* unused */
2187 prim.prim = vertex.prim;
2188 prim.numverts = vertex.count;
2189 prim.vc_format = dev_priv->sarea_priv->vc_format;
2190
2191 radeon_cp_dispatch_vertex( dev, buf, &prim );
2192 }
2193
2194 if (vertex.discard) {
2195 radeon_cp_discard_buffer( dev, buf );
2196 }
2197
2198 COMMIT_RING();
2199 return 0;
2200 }
2201
2202 static int radeon_cp_indices( DRM_IOCTL_ARGS )
2203 {
2204 DRM_DEVICE;
2205 drm_radeon_private_t *dev_priv = dev->dev_private;
2206 drm_file_t *filp_priv;
2207 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2208 drm_device_dma_t *dma = dev->dma;
2209 drm_buf_t *buf;
2210 drm_radeon_indices_t elts;
2211 drm_radeon_tcl_prim_t prim;
2212 int count;
2213
2214 LOCK_TEST_WITH_RETURN( dev, filp );
2215
2216 if ( !dev_priv ) {
2217 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2218 return DRM_ERR(EINVAL);
2219 }
2220
2221 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
2222
2223 DRM_COPY_FROM_USER_IOCTL( elts, (drm_radeon_indices_t __user *)data,
2224 sizeof(elts) );
2225
2226 DRM_DEBUG( "pid=%d index=%d start=%d end=%d discard=%d\n",
2227 DRM_CURRENTPID,
2228 elts.idx, elts.start, elts.end, elts.discard );
2229
2230 if ( elts.idx < 0 || elts.idx >= dma->buf_count ) {
2231 DRM_ERROR( "buffer index %d (of %d max)\n",
2232 elts.idx, dma->buf_count - 1 );
2233 return DRM_ERR(EINVAL);
2234 }
2235 if ( elts.prim < 0 ||
2236 elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {
2237 DRM_ERROR( "buffer prim %d\n", elts.prim );
2238 return DRM_ERR(EINVAL);
2239 }
2240
2241 RING_SPACE_TEST_WITH_RETURN( dev_priv );
2242 VB_AGE_TEST_WITH_RETURN( dev_priv );
2243
2244 buf = dma->buflist[elts.idx];
2245
2246 if ( buf->filp != filp ) {
2247 DRM_ERROR( "process %d using buffer owned by %p\n",
2248 DRM_CURRENTPID, buf->filp );
2249 return DRM_ERR(EINVAL);
2250 }
2251 if ( buf->pending ) {
2252 DRM_ERROR( "sending pending buffer %d\n", elts.idx );
2253 return DRM_ERR(EINVAL);
2254 }
2255
2256 count = (elts.end - elts.start) / sizeof(u16);
2257 elts.start -= RADEON_INDEX_PRIM_OFFSET;
2258
2259 if ( elts.start & 0x7 ) {
2260 DRM_ERROR( "misaligned buffer 0x%x\n", elts.start );
2261 return DRM_ERR(EINVAL);
2262 }
2263 if ( elts.start < buf->used ) {
2264 DRM_ERROR( "no header 0x%x - 0x%x\n", elts.start, buf->used );
2265 return DRM_ERR(EINVAL);
2266 }
2267
2268 buf->used = elts.end;
2269
2270 if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) {
2271 if ( radeon_emit_state( dev_priv, filp_priv,
2272 &sarea_priv->context_state,
2273 sarea_priv->tex_state,
2274 sarea_priv->dirty ) ) {
2275 DRM_ERROR( "radeon_emit_state failed\n" );
2276 return DRM_ERR( EINVAL );
2277 }
2278
2279 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
2280 RADEON_UPLOAD_TEX1IMAGES |
2281 RADEON_UPLOAD_TEX2IMAGES |
2282 RADEON_REQUIRE_QUIESCENCE);
2283 }
2284
2285
2286 /* Build up a prim_t record:
2287 */
2288 prim.start = elts.start;
2289 prim.finish = elts.end;
2290 prim.prim = elts.prim;
2291 prim.offset = 0; /* offset from start of dma buffers */
2292 prim.numverts = RADEON_MAX_VB_VERTS; /* duh */
2293 prim.vc_format = dev_priv->sarea_priv->vc_format;
2294
2295 radeon_cp_dispatch_indices( dev, buf, &prim );
2296 if (elts.discard) {
2297 radeon_cp_discard_buffer( dev, buf );
2298 }
2299
2300 COMMIT_RING();
2301 return 0;
2302 }
2303
2304 static int radeon_cp_texture( DRM_IOCTL_ARGS )
2305 {
2306 DRM_DEVICE;
2307 drm_radeon_private_t *dev_priv = dev->dev_private;
2308 drm_radeon_texture_t tex;
2309 drm_radeon_tex_image_t image;
2310 int ret;
2311
2312 LOCK_TEST_WITH_RETURN( dev, filp );
2313
2314 DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t __user *)data, sizeof(tex) );
2315
2316 if ( tex.image == NULL ) {
2317 DRM_ERROR( "null texture image!\n" );
2318 return DRM_ERR(EINVAL);
2319 }
2320
2321 if ( DRM_COPY_FROM_USER( &image,
2322 (drm_radeon_tex_image_t __user *)tex.image,
2323 sizeof(image) ) )
2324 return DRM_ERR(EFAULT);
2325
2326 RING_SPACE_TEST_WITH_RETURN( dev_priv );
2327 VB_AGE_TEST_WITH_RETURN( dev_priv );
2328
2329 ret = radeon_cp_dispatch_texture( filp, dev, &tex, &image );
2330
2331 COMMIT_RING();
2332 return ret;
2333 }
2334
2335 static int radeon_cp_stipple( DRM_IOCTL_ARGS )
2336 {
2337 DRM_DEVICE;
2338 drm_radeon_private_t *dev_priv = dev->dev_private;
2339 drm_radeon_stipple_t stipple;
2340 u32 mask[32];
2341
2342 LOCK_TEST_WITH_RETURN( dev, filp );
2343
2344 DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t __user *)data,
2345 sizeof(stipple) );
2346
2347 if ( DRM_COPY_FROM_USER( &mask, stipple.mask, 32 * sizeof(u32) ) )
2348 return DRM_ERR(EFAULT);
2349
2350 RING_SPACE_TEST_WITH_RETURN( dev_priv );
2351
2352 radeon_cp_dispatch_stipple( dev, mask );
2353
2354 COMMIT_RING();
2355 return 0;
2356 }
2357
2358 static int radeon_cp_indirect( DRM_IOCTL_ARGS )
2359 {
2360 DRM_DEVICE;
2361 drm_radeon_private_t *dev_priv = dev->dev_private;
2362 drm_device_dma_t *dma = dev->dma;
2363 drm_buf_t *buf;
2364 drm_radeon_indirect_t indirect;
2365 RING_LOCALS;
2366
2367 LOCK_TEST_WITH_RETURN( dev, filp );
2368
2369 if ( !dev_priv ) {
2370 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2371 return DRM_ERR(EINVAL);
2372 }
2373
2374 DRM_COPY_FROM_USER_IOCTL( indirect, (drm_radeon_indirect_t __user *)data,
2375 sizeof(indirect) );
2376
2377 DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
2378 indirect.idx, indirect.start,
2379 indirect.end, indirect.discard );
2380
2381 if ( indirect.idx < 0 || indirect.idx >= dma->buf_count ) {
2382 DRM_ERROR( "buffer index %d (of %d max)\n",
2383 indirect.idx, dma->buf_count - 1 );
2384 return DRM_ERR(EINVAL);
2385 }
2386
2387 buf = dma->buflist[indirect.idx];
2388
2389 if ( buf->filp != filp ) {
2390 DRM_ERROR( "process %d using buffer owned by %p\n",
2391 DRM_CURRENTPID, buf->filp );
2392 return DRM_ERR(EINVAL);
2393 }
2394 if ( buf->pending ) {
2395 DRM_ERROR( "sending pending buffer %d\n", indirect.idx );
2396 return DRM_ERR(EINVAL);
2397 }
2398
2399 if ( indirect.start < buf->used ) {
2400 DRM_ERROR( "reusing indirect: start=0x%x actual=0x%x\n",
2401 indirect.start, buf->used );
2402 return DRM_ERR(EINVAL);
2403 }
2404
2405 RING_SPACE_TEST_WITH_RETURN( dev_priv );
2406 VB_AGE_TEST_WITH_RETURN( dev_priv );
2407
2408 buf->used = indirect.end;
2409
2410 /* Wait for the 3D stream to idle before the indirect buffer
2411 * containing 2D acceleration commands is processed.
2412 */
2413 BEGIN_RING( 2 );
2414
2415 RADEON_WAIT_UNTIL_3D_IDLE();
2416
2417 ADVANCE_RING();
2418
2419 /* Dispatch the indirect buffer full of commands from the
2420 * X server. This is insecure and is thus only available to
2421 * privileged clients.
2422 */
2423 radeon_cp_dispatch_indirect( dev, buf, indirect.start, indirect.end );
2424 if (indirect.discard) {
2425 radeon_cp_discard_buffer( dev, buf );
2426 }
2427
2428
2429 COMMIT_RING();
2430 return 0;
2431 }
2432
2433 static int radeon_cp_vertex2( DRM_IOCTL_ARGS )
2434 {
2435 DRM_DEVICE;
2436 drm_radeon_private_t *dev_priv = dev->dev_private;
2437 drm_file_t *filp_priv;
2438 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2439 drm_device_dma_t *dma = dev->dma;
2440 drm_buf_t *buf;
2441 drm_radeon_vertex2_t vertex;
2442 int i;
2443 unsigned char laststate;
2444
2445 LOCK_TEST_WITH_RETURN( dev, filp );
2446
2447 if ( !dev_priv ) {
2448 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2449 return DRM_ERR(EINVAL);
2450 }
2451
2452 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
2453
2454 DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex2_t __user *)data,
2455 sizeof(vertex) );
2456
2457 DRM_DEBUG( "pid=%d index=%d discard=%d\n",
2458 DRM_CURRENTPID,
2459 vertex.idx, vertex.discard );
2460
2461 if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {
2462 DRM_ERROR( "buffer index %d (of %d max)\n",
2463 vertex.idx, dma->buf_count - 1 );
2464 return DRM_ERR(EINVAL);
2465 }
2466
2467 RING_SPACE_TEST_WITH_RETURN( dev_priv );
2468 VB_AGE_TEST_WITH_RETURN( dev_priv );
2469
2470 buf = dma->buflist[vertex.idx];
2471
2472 if ( buf->filp != filp ) {
2473 DRM_ERROR( "process %d using buffer owned by %p\n",
2474 DRM_CURRENTPID, buf->filp );
2475 return DRM_ERR(EINVAL);
2476 }
2477
2478 if ( buf->pending ) {
2479 DRM_ERROR( "sending pending buffer %d\n", vertex.idx );
2480 return DRM_ERR(EINVAL);
2481 }
2482
2483 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2484 return DRM_ERR(EINVAL);
2485
2486 for (laststate = 0xff, i = 0 ; i < vertex.nr_prims ; i++) {
2487 drm_radeon_prim_t prim;
2488 drm_radeon_tcl_prim_t tclprim;
2489
2490 if ( DRM_COPY_FROM_USER( &prim, &vertex.prim[i], sizeof(prim) ) )
2491 return DRM_ERR(EFAULT);
2492
2493 if ( prim.stateidx != laststate ) {
2494 drm_radeon_state_t state;
2495
2496 if ( DRM_COPY_FROM_USER( &state,
2497 &vertex.state[prim.stateidx],
2498 sizeof(state) ) )
2499 return DRM_ERR(EFAULT);
2500
2501 if ( radeon_emit_state2( dev_priv, filp_priv, &state ) ) {
2502 DRM_ERROR( "radeon_emit_state2 failed\n" );
2503 return DRM_ERR( EINVAL );
2504 }
2505
2506 laststate = prim.stateidx;
2507 }
2508
2509 tclprim.start = prim.start;
2510 tclprim.finish = prim.finish;
2511 tclprim.prim = prim.prim;
2512 tclprim.vc_format = prim.vc_format;
2513
2514 if ( prim.prim & RADEON_PRIM_WALK_IND ) {
2515 tclprim.offset = prim.numverts * 64;
2516 tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */
2517
2518 radeon_cp_dispatch_indices( dev, buf, &tclprim );
2519 } else {
2520 tclprim.numverts = prim.numverts;
2521 tclprim.offset = 0; /* not used */
2522
2523 radeon_cp_dispatch_vertex( dev, buf, &tclprim );
2524 }
2525
2526 if (sarea_priv->nbox == 1)
2527 sarea_priv->nbox = 0;
2528 }
2529
2530 if ( vertex.discard ) {
2531 radeon_cp_discard_buffer( dev, buf );
2532 }
2533
2534 COMMIT_RING();
2535 return 0;
2536 }
2537
2538
2539 static int radeon_emit_packets(
2540 drm_radeon_private_t *dev_priv,
2541 drm_file_t *filp_priv,
2542 drm_radeon_cmd_header_t header,
2543 drm_radeon_cmd_buffer_t *cmdbuf )
2544 {
2545 int id = (int)header.packet.packet_id;
2546 int sz, reg;
2547 int *data = (int *)cmdbuf->buf;
2548 RING_LOCALS;
2549
2550 if (id >= RADEON_MAX_STATE_PACKETS)
2551 return DRM_ERR(EINVAL);
2552
2553 sz = packet[id].len;
2554 reg = packet[id].start;
2555
2556 if (sz * sizeof(int) > cmdbuf->bufsz) {
2557 DRM_ERROR( "Packet size provided larger than data provided\n" );
2558 return DRM_ERR(EINVAL);
2559 }
2560
2561 if ( radeon_check_and_fixup_packets( dev_priv, filp_priv, id, data ) ) {
2562 DRM_ERROR( "Packet verification failed\n" );
2563 return DRM_ERR( EINVAL );
2564 }
2565
2566 BEGIN_RING(sz+1);
2567 OUT_RING( CP_PACKET0( reg, (sz-1) ) );
2568 OUT_RING_TABLE( data, sz );
2569 ADVANCE_RING();
2570
2571 cmdbuf->buf += sz * sizeof(int);
2572 cmdbuf->bufsz -= sz * sizeof(int);
2573 return 0;
2574 }
2575
2576 static __inline__ int radeon_emit_scalars(
2577 drm_radeon_private_t *dev_priv,
2578 drm_radeon_cmd_header_t header,
2579 drm_radeon_cmd_buffer_t *cmdbuf )
2580 {
2581 int sz = header.scalars.count;
2582 int start = header.scalars.offset;
2583 int stride = header.scalars.stride;
2584 RING_LOCALS;
2585
2586 BEGIN_RING( 3+sz );
2587 OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) );
2588 OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2589 OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) );
2590 OUT_RING_TABLE( cmdbuf->buf, sz );
2591 ADVANCE_RING();
2592 cmdbuf->buf += sz * sizeof(int);
2593 cmdbuf->bufsz -= sz * sizeof(int);
2594 return 0;
2595 }
2596
2597 /* God this is ugly
2598 */
2599 static __inline__ int radeon_emit_scalars2(
2600 drm_radeon_private_t *dev_priv,
2601 drm_radeon_cmd_header_t header,
2602 drm_radeon_cmd_buffer_t *cmdbuf )
2603 {
2604 int sz = header.scalars.count;
2605 int start = ((unsigned int)header.scalars.offset) + 0x100;
2606 int stride = header.scalars.stride;
2607 RING_LOCALS;
2608
2609 BEGIN_RING( 3+sz );
2610 OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) );
2611 OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2612 OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) );
2613 OUT_RING_TABLE( cmdbuf->buf, sz );
2614 ADVANCE_RING();
2615 cmdbuf->buf += sz * sizeof(int);
2616 cmdbuf->bufsz -= sz * sizeof(int);
2617 return 0;
2618 }
2619
2620 static __inline__ int radeon_emit_vectors(
2621 drm_radeon_private_t *dev_priv,
2622 drm_radeon_cmd_header_t header,
2623 drm_radeon_cmd_buffer_t *cmdbuf )
2624 {
2625 int sz = header.vectors.count;
2626 int start = header.vectors.offset;
2627 int stride = header.vectors.stride;
2628 RING_LOCALS;
2629
2630 BEGIN_RING( 3+sz );
2631 OUT_RING( CP_PACKET0( RADEON_SE_TCL_VECTOR_INDX_REG, 0 ) );
2632 OUT_RING( start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2633 OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_VECTOR_DATA_REG, (sz-1) ) );
2634 OUT_RING_TABLE( cmdbuf->buf, sz );
2635 ADVANCE_RING();
2636
2637 cmdbuf->buf += sz * sizeof(int);
2638 cmdbuf->bufsz -= sz * sizeof(int);
2639 return 0;
2640 }
2641
2642
2643 static int radeon_emit_packet3( drm_device_t *dev,
2644 drm_file_t *filp_priv,
2645 drm_radeon_cmd_buffer_t *cmdbuf )
2646 {
2647 drm_radeon_private_t *dev_priv = dev->dev_private;
2648 unsigned int cmdsz;
2649 int ret;
2650 RING_LOCALS;
2651
2652 DRM_DEBUG("\n");
2653
2654 if ( ( ret = radeon_check_and_fixup_packet3( dev_priv, filp_priv,
2655 cmdbuf, &cmdsz ) ) ) {
2656 DRM_ERROR( "Packet verification failed\n" );
2657 return ret;
2658 }
2659
2660 BEGIN_RING( cmdsz );
2661 OUT_RING_TABLE( cmdbuf->buf, cmdsz );
2662 ADVANCE_RING();
2663
2664 cmdbuf->buf += cmdsz * 4;
2665 cmdbuf->bufsz -= cmdsz * 4;
2666 return 0;
2667 }
2668
2669
2670 static int radeon_emit_packet3_cliprect( drm_device_t *dev,
2671 drm_file_t *filp_priv,
2672 drm_radeon_cmd_buffer_t *cmdbuf,
2673 int orig_nbox )
2674 {
2675 drm_radeon_private_t *dev_priv = dev->dev_private;
2676 drm_clip_rect_t box;
2677 unsigned int cmdsz;
2678 int ret;
2679 drm_clip_rect_t __user *boxes = cmdbuf->boxes;
2680 int i = 0;
2681 RING_LOCALS;
2682
2683 DRM_DEBUG("\n");
2684
2685 if ( ( ret = radeon_check_and_fixup_packet3( dev_priv, filp_priv,
2686 cmdbuf, &cmdsz ) ) ) {
2687 DRM_ERROR( "Packet verification failed\n" );
2688 return ret;
2689 }
2690
2691 if (!orig_nbox)
2692 goto out;
2693
2694 do {
2695 if ( i < cmdbuf->nbox ) {
2696 if (DRM_COPY_FROM_USER( &box, &boxes[i], sizeof(box) ))
2697 return DRM_ERR(EFAULT);
2698 /* FIXME The second and subsequent times round
2699 * this loop, send a WAIT_UNTIL_3D_IDLE before
2700 * calling emit_clip_rect(). This fixes a
2701 * lockup on fast machines when sending
2702 * several cliprects with a cmdbuf, as when
2703 * waving a 2D window over a 3D
2704 * window. Something in the commands from user
2705 * space seems to hang the card when they're
2706 * sent several times in a row. That would be
2707 * the correct place to fix it but this works
2708 * around it until I can figure that out - Tim
2709 * Smith */
2710 if ( i ) {
2711 BEGIN_RING( 2 );
2712 RADEON_WAIT_UNTIL_3D_IDLE();
2713 ADVANCE_RING();
2714 }
2715 radeon_emit_clip_rect( dev_priv, &box );
2716 }
2717
2718 BEGIN_RING( cmdsz );
2719 OUT_RING_TABLE( cmdbuf->buf, cmdsz );
2720 ADVANCE_RING();
2721
2722 } while ( ++i < cmdbuf->nbox );
2723 if (cmdbuf->nbox == 1)
2724 cmdbuf->nbox = 0;
2725
2726 out:
2727 cmdbuf->buf += cmdsz * 4;
2728 cmdbuf->bufsz -= cmdsz * 4;
2729 return 0;
2730 }
2731
2732
2733 static int radeon_emit_wait( drm_device_t *dev, int flags )
2734 {
2735 drm_radeon_private_t *dev_priv = dev->dev_private;
2736 RING_LOCALS;
2737
2738 DRM_DEBUG("%s: %x\n", __FUNCTION__, flags);
2739 switch (flags) {
2740 case RADEON_WAIT_2D:
2741 BEGIN_RING( 2 );
2742 RADEON_WAIT_UNTIL_2D_IDLE();
2743 ADVANCE_RING();
2744 break;
2745 case RADEON_WAIT_3D:
2746 BEGIN_RING( 2 );
2747 RADEON_WAIT_UNTIL_3D_IDLE();
2748 ADVANCE_RING();
2749 break;
2750 case RADEON_WAIT_2D|RADEON_WAIT_3D:
2751 BEGIN_RING( 2 );
2752 RADEON_WAIT_UNTIL_IDLE();
2753 ADVANCE_RING();
2754 break;
2755 default:
2756 return DRM_ERR(EINVAL);
2757 }
2758
2759 return 0;
2760 }
2761
2762 static int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
2763 {
2764 DRM_DEVICE;
2765 drm_radeon_private_t *dev_priv = dev->dev_private;
2766 drm_file_t *filp_priv;
2767 drm_device_dma_t *dma = dev->dma;
2768 drm_buf_t *buf = NULL;
2769 int idx;
2770 drm_radeon_cmd_buffer_t cmdbuf;
2771 drm_radeon_cmd_header_t header;
2772 int orig_nbox, orig_bufsz;
2773 char *kbuf=NULL;
2774
2775 LOCK_TEST_WITH_RETURN( dev, filp );
2776
2777 if ( !dev_priv ) {
2778 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2779 return DRM_ERR(EINVAL);
2780 }
2781
2782 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
2783
2784 DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t __user *)data,
2785 sizeof(cmdbuf) );
2786
2787 RING_SPACE_TEST_WITH_RETURN( dev_priv );
2788 VB_AGE_TEST_WITH_RETURN( dev_priv );
2789
2790 if (cmdbuf.bufsz > 64*1024 || cmdbuf.bufsz<0) {
2791 return DRM_ERR(EINVAL);
2792 }
2793
2794 /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid
2795 * races between checking values and using those values in other code,
2796 * and simply to avoid a lot of function calls to copy in data.
2797 */
2798 orig_bufsz = cmdbuf.bufsz;
2799 if (orig_bufsz != 0) {
2800 kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER);
2801 if (kbuf == NULL)
2802 return DRM_ERR(ENOMEM);
2803 if (DRM_COPY_FROM_USER(kbuf, cmdbuf.buf, cmdbuf.bufsz)) {
2804 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2805 return DRM_ERR(EFAULT);
2806 }
2807 cmdbuf.buf = kbuf;
2808 }
2809
2810 orig_nbox = cmdbuf.nbox;
2811
2812 if(dev_priv->microcode_version == UCODE_R300) {
2813 int temp;
2814 temp=r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf);
2815
2816 if (orig_bufsz != 0)
2817 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2818
2819 return temp;
2820 }
2821
2822 /* microcode_version != r300 */
2823 while ( cmdbuf.bufsz >= sizeof(header) ) {
2824
2825 header.i = *(int *)cmdbuf.buf;
2826 cmdbuf.buf += sizeof(header);
2827 cmdbuf.bufsz -= sizeof(header);
2828
2829 switch (header.header.cmd_type) {
2830 case RADEON_CMD_PACKET:
2831 DRM_DEBUG("RADEON_CMD_PACKET\n");
2832 if (radeon_emit_packets( dev_priv, filp_priv, header, &cmdbuf )) {
2833 DRM_ERROR("radeon_emit_packets failed\n");
2834 goto err;
2835 }
2836 break;
2837
2838 case RADEON_CMD_SCALARS:
2839 DRM_DEBUG("RADEON_CMD_SCALARS\n");
2840 if (radeon_emit_scalars( dev_priv, header, &cmdbuf )) {
2841 DRM_ERROR("radeon_emit_scalars failed\n");
2842 goto err;
2843 }
2844 break;
2845
2846 case RADEON_CMD_VECTORS:
2847 DRM_DEBUG("RADEON_CMD_VECTORS\n");
2848 if (radeon_emit_vectors( dev_priv, header, &cmdbuf )) {
2849 DRM_ERROR("radeon_emit_vectors failed\n");
2850 goto err;
2851 }
2852 break;
2853
2854 case RADEON_CMD_DMA_DISCARD:
2855 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
2856 idx = header.dma.buf_idx;
2857 if ( idx < 0 || idx >= dma->buf_count ) {
2858 DRM_ERROR( "buffer index %d (of %d max)\n",
2859 idx, dma->buf_count - 1 );
2860 goto err;
2861 }
2862
2863 buf = dma->buflist[idx];
2864 if ( buf->filp != filp || buf->pending ) {
2865 DRM_ERROR( "bad buffer %p %p %d\n",
2866 buf->filp, filp, buf->pending);
2867 goto err;
2868 }
2869
2870 radeon_cp_discard_buffer( dev, buf );
2871 break;
2872
2873 case RADEON_CMD_PACKET3:
2874 DRM_DEBUG("RADEON_CMD_PACKET3\n");
2875 if (radeon_emit_packet3( dev, filp_priv, &cmdbuf )) {
2876 DRM_ERROR("radeon_emit_packet3 failed\n");
2877 goto err;
2878 }
2879 break;
2880
2881 case RADEON_CMD_PACKET3_CLIP:
2882 DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
2883 if (radeon_emit_packet3_cliprect( dev, filp_priv, &cmdbuf, orig_nbox )) {
2884 DRM_ERROR("radeon_emit_packet3_clip failed\n");
2885 goto err;
2886 }
2887 break;
2888
2889 case RADEON_CMD_SCALARS2:
2890 DRM_DEBUG("RADEON_CMD_SCALARS2\n");
2891 if (radeon_emit_scalars2( dev_priv, header, &cmdbuf )) {
2892 DRM_ERROR("radeon_emit_scalars2 failed\n");
2893 goto err;
2894 }
2895 break;
2896
2897 case RADEON_CMD_WAIT:
2898 DRM_DEBUG("RADEON_CMD_WAIT\n");
2899 if (radeon_emit_wait( dev, header.wait.flags )) {
2900 DRM_ERROR("radeon_emit_wait failed\n");
2901 goto err;
2902 }
2903 break;
2904 default:
2905 DRM_ERROR("bad cmd_type %d at %p\n",
2906 header.header.cmd_type,
2907 cmdbuf.buf - sizeof(header));
2908 goto err;
2909 }
2910 }
2911
2912 if (orig_bufsz != 0)
2913 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2914
2915 DRM_DEBUG("DONE\n");
2916 COMMIT_RING();
2917 return 0;
2918
2919 err:
2920 if (orig_bufsz != 0)
2921 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2922 return DRM_ERR(EINVAL);
2923 }
2924
2925
2926
2927 static int radeon_cp_getparam( DRM_IOCTL_ARGS )
2928 {
2929 DRM_DEVICE;
2930 drm_radeon_private_t *dev_priv = dev->dev_private;
2931 drm_radeon_getparam_t param;
2932 int value;
2933
2934 if ( !dev_priv ) {
2935 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2936 return DRM_ERR(EINVAL);
2937 }
2938
2939 DRM_COPY_FROM_USER_IOCTL( param, (drm_radeon_getparam_t __user *)data,
2940 sizeof(param) );
2941
2942 DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
2943
2944 switch( param.param ) {
2945 case RADEON_PARAM_GART_BUFFER_OFFSET:
2946 value = dev_priv->gart_buffers_offset;
2947 break;
2948 case RADEON_PARAM_LAST_FRAME:
2949 dev_priv->stats.last_frame_reads++;
2950 value = GET_SCRATCH( 0 );
2951 break;
2952 case RADEON_PARAM_LAST_DISPATCH:
2953 value = GET_SCRATCH( 1 );
2954 break;
2955 case RADEON_PARAM_LAST_CLEAR:
2956 dev_priv->stats.last_clear_reads++;
2957 value = GET_SCRATCH( 2 );
2958 break;
2959 case RADEON_PARAM_IRQ_NR:
2960 value = dev->irq;
2961 break;
2962 case RADEON_PARAM_GART_BASE:
2963 value = dev_priv->gart_vm_start;
2964 break;
2965 case RADEON_PARAM_REGISTER_HANDLE:
2966 value = dev_priv->mmio_offset;
2967 break;
2968 case RADEON_PARAM_STATUS_HANDLE:
2969 value = dev_priv->ring_rptr_offset;
2970 break;
2971 #if BITS_PER_LONG == 32
2972 /*
2973 * This ioctl() doesn't work on 64-bit platforms because hw_lock is a
2974 * pointer which can't fit into an int-sized variable. According to
2975 * Michel Dänzer, the ioctl() is only used on embedded platforms, so
2976 * not supporting it shouldn't be a problem. If the same functionality
2977 * is needed on 64-bit platforms, a new ioctl() would have to be added,
2978 * so backwards-compatibility for the embedded platforms can be
2979 * maintained. --davidm 4-Feb-2004.
2980 */
2981 case RADEON_PARAM_SAREA_HANDLE:
2982 /* The lock is the first dword in the sarea. */
2983 value = (long)dev->lock.hw_lock;
2984 break;
2985 #endif
2986 case RADEON_PARAM_GART_TEX_HANDLE:
2987 value = dev_priv->gart_textures_offset;
2988 break;
2989 default:
2990 return DRM_ERR(EINVAL);
2991 }
2992
2993 if ( DRM_COPY_TO_USER( param.value, &value, sizeof(int) ) ) {
2994 DRM_ERROR( "copy_to_user\n" );
2995 return DRM_ERR(EFAULT);
2996 }
2997
2998 return 0;
2999 }
3000
3001 static int radeon_cp_setparam( DRM_IOCTL_ARGS ) {
3002 DRM_DEVICE;
3003 drm_radeon_private_t *dev_priv = dev->dev_private;
3004 drm_file_t *filp_priv;
3005 drm_radeon_setparam_t sp;
3006 struct drm_radeon_driver_file_fields *radeon_priv;
3007
3008 if ( !dev_priv ) {
3009 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
3010 return DRM_ERR( EINVAL );
3011 }
3012
3013 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
3014
3015 DRM_COPY_FROM_USER_IOCTL( sp, ( drm_radeon_setparam_t __user * )data,
3016 sizeof( sp ) );
3017
3018 switch( sp.param ) {
3019 case RADEON_SETPARAM_FB_LOCATION:
3020 radeon_priv = filp_priv->driver_priv;
3021 radeon_priv->radeon_fb_delta = dev_priv->fb_location - sp.value;
3022 break;
3023 case RADEON_SETPARAM_SWITCH_TILING:
3024 if (sp.value == 0) {
3025 DRM_DEBUG( "color tiling disabled\n" );
3026 dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
3027 dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
3028 dev_priv->sarea_priv->tiling_enabled = 0;
3029 }
3030 else if (sp.value == 1) {
3031 DRM_DEBUG( "color tiling enabled\n" );
3032 dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
3033 dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
3034 dev_priv->sarea_priv->tiling_enabled = 1;
3035 }
3036 break;
3037 default:
3038 DRM_DEBUG( "Invalid parameter %d\n", sp.param );
3039 return DRM_ERR( EINVAL );
3040 }
3041
3042 return 0;
3043 }
3044
3045 /* When a client dies:
3046 * - Check for and clean up flipped page state
3047 * - Free any alloced GART memory.
3048 *
3049 * DRM infrastructure takes care of reclaiming dma buffers.
3050 */
3051 void radeon_driver_prerelease(drm_device_t *dev, DRMFILE filp)
3052 {
3053 if ( dev->dev_private ) {
3054 drm_radeon_private_t *dev_priv = dev->dev_private;
3055 if ( dev_priv->page_flipping ) {
3056 radeon_do_cleanup_pageflip( dev );
3057 }
3058 radeon_mem_release( filp, dev_priv->gart_heap );
3059 radeon_mem_release( filp, dev_priv->fb_heap );
3060 radeon_surfaces_release(filp, dev_priv);
3061 }
3062 }
3063
3064 void radeon_driver_pretakedown(drm_device_t *dev)
3065 {
3066 radeon_do_release(dev);
3067 }
3068
3069 int radeon_driver_open_helper(drm_device_t *dev, drm_file_t *filp_priv)
3070 {
3071 drm_radeon_private_t *dev_priv = dev->dev_private;
3072 struct drm_radeon_driver_file_fields *radeon_priv;
3073
3074 radeon_priv = (struct drm_radeon_driver_file_fields *)drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
3075
3076 if (!radeon_priv)
3077 return -ENOMEM;
3078
3079 filp_priv->driver_priv = radeon_priv;
3080 if ( dev_priv )
3081 radeon_priv->radeon_fb_delta = dev_priv->fb_location;
3082 else
3083 radeon_priv->radeon_fb_delta = 0;
3084 return 0;
3085 }
3086
3087
3088 void radeon_driver_free_filp_priv(drm_device_t *dev, drm_file_t *filp_priv)
3089 {
3090 struct drm_radeon_driver_file_fields *radeon_priv = filp_priv->driver_priv;
3091
3092 drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES);
3093 }
3094
3095 drm_ioctl_desc_t radeon_ioctls[] = {
3096 [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 },
3097 [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = { radeon_cp_start, 1, 1 },
3098 [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 },
3099 [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 },
3100 [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 },
3101 [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = { radeon_cp_resume, 1, 0 },
3102 [DRM_IOCTL_NR(DRM_RADEON_RESET)] = { radeon_engine_reset, 1, 0 },
3103 [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 },
3104 [DRM_IOCTL_NR(DRM_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 },
3105 [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = { radeon_cp_clear, 1, 0 },
3106 [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = { radeon_cp_vertex, 1, 0 },
3107 [DRM_IOCTL_NR(DRM_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 },
3108 [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = { radeon_cp_texture, 1, 0 },
3109 [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 },
3110 [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = { radeon_cp_indirect, 1, 1 },
3111 [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = { radeon_cp_vertex2, 1, 0 },
3112 [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = { radeon_cp_cmdbuf, 1, 0 },
3113 [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = { radeon_cp_getparam, 1, 0 },
3114 [DRM_IOCTL_NR(DRM_RADEON_FLIP)] = { radeon_cp_flip, 1, 0 },
3115 [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = { radeon_mem_alloc, 1, 0 },
3116 [DRM_IOCTL_NR(DRM_RADEON_FREE)] = { radeon_mem_free, 1, 0 },
3117 [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = { radeon_mem_init_heap,1, 1 },
3118 [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = { radeon_irq_emit, 1, 0 },
3119 [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = { radeon_irq_wait, 1, 0 },
3120 [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = { radeon_cp_setparam, 1, 0 },
3121 [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = { radeon_surface_alloc,1, 0 },
3122 [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = { radeon_surface_free, 1, 0 }
3123 };
3124
3125 int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);