Merge ../linux-2.6-watchdog-mm
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / char / drm / r128_state.c
1 /* r128_state.c -- State support for r128 -*- linux-c -*-
2 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
3 */
4 /*
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Gareth Hughes <gareth@valinux.com>
29 */
30
31 #include "drmP.h"
32 #include "drm.h"
33 #include "r128_drm.h"
34 #include "r128_drv.h"
35
36 /* ================================================================
37 * CCE hardware state programming functions
38 */
39
40 static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
41 drm_clip_rect_t * boxes, int count)
42 {
43 u32 aux_sc_cntl = 0x00000000;
44 RING_LOCALS;
45 DRM_DEBUG(" %s\n", __FUNCTION__);
46
47 BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
48
49 if (count >= 1) {
50 OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3));
51 OUT_RING(boxes[0].x1);
52 OUT_RING(boxes[0].x2 - 1);
53 OUT_RING(boxes[0].y1);
54 OUT_RING(boxes[0].y2 - 1);
55
56 aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
57 }
58 if (count >= 2) {
59 OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3));
60 OUT_RING(boxes[1].x1);
61 OUT_RING(boxes[1].x2 - 1);
62 OUT_RING(boxes[1].y1);
63 OUT_RING(boxes[1].y2 - 1);
64
65 aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
66 }
67 if (count >= 3) {
68 OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3));
69 OUT_RING(boxes[2].x1);
70 OUT_RING(boxes[2].x2 - 1);
71 OUT_RING(boxes[2].y1);
72 OUT_RING(boxes[2].y2 - 1);
73
74 aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
75 }
76
77 OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0));
78 OUT_RING(aux_sc_cntl);
79
80 ADVANCE_RING();
81 }
82
83 static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
84 {
85 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
86 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
87 RING_LOCALS;
88 DRM_DEBUG(" %s\n", __FUNCTION__);
89
90 BEGIN_RING(2);
91
92 OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0));
93 OUT_RING(ctx->scale_3d_cntl);
94
95 ADVANCE_RING();
96 }
97
98 static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
99 {
100 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
101 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
102 RING_LOCALS;
103 DRM_DEBUG(" %s\n", __FUNCTION__);
104
105 BEGIN_RING(13);
106
107 OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11));
108 OUT_RING(ctx->dst_pitch_offset_c);
109 OUT_RING(ctx->dp_gui_master_cntl_c);
110 OUT_RING(ctx->sc_top_left_c);
111 OUT_RING(ctx->sc_bottom_right_c);
112 OUT_RING(ctx->z_offset_c);
113 OUT_RING(ctx->z_pitch_c);
114 OUT_RING(ctx->z_sten_cntl_c);
115 OUT_RING(ctx->tex_cntl_c);
116 OUT_RING(ctx->misc_3d_state_cntl_reg);
117 OUT_RING(ctx->texture_clr_cmp_clr_c);
118 OUT_RING(ctx->texture_clr_cmp_msk_c);
119 OUT_RING(ctx->fog_color_c);
120
121 ADVANCE_RING();
122 }
123
124 static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
125 {
126 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
127 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
128 RING_LOCALS;
129 DRM_DEBUG(" %s\n", __FUNCTION__);
130
131 BEGIN_RING(3);
132
133 OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP));
134 OUT_RING(ctx->setup_cntl);
135 OUT_RING(ctx->pm4_vc_fpu_setup);
136
137 ADVANCE_RING();
138 }
139
140 static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
141 {
142 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
143 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
144 RING_LOCALS;
145 DRM_DEBUG(" %s\n", __FUNCTION__);
146
147 BEGIN_RING(5);
148
149 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
150 OUT_RING(ctx->dp_write_mask);
151
152 OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1));
153 OUT_RING(ctx->sten_ref_mask_c);
154 OUT_RING(ctx->plane_3d_mask_c);
155
156 ADVANCE_RING();
157 }
158
159 static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
160 {
161 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
162 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
163 RING_LOCALS;
164 DRM_DEBUG(" %s\n", __FUNCTION__);
165
166 BEGIN_RING(2);
167
168 OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0));
169 OUT_RING(ctx->window_xy_offset);
170
171 ADVANCE_RING();
172 }
173
174 static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
175 {
176 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
177 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
178 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
179 int i;
180 RING_LOCALS;
181 DRM_DEBUG(" %s\n", __FUNCTION__);
182
183 BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
184
185 OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C,
186 2 + R128_MAX_TEXTURE_LEVELS));
187 OUT_RING(tex->tex_cntl);
188 OUT_RING(tex->tex_combine_cntl);
189 OUT_RING(ctx->tex_size_pitch_c);
190 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
191 OUT_RING(tex->tex_offset[i]);
192 }
193
194 OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
195 OUT_RING(ctx->constant_color_c);
196 OUT_RING(tex->tex_border_color);
197
198 ADVANCE_RING();
199 }
200
201 static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
202 {
203 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
204 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
205 int i;
206 RING_LOCALS;
207 DRM_DEBUG(" %s\n", __FUNCTION__);
208
209 BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
210
211 OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
212 OUT_RING(tex->tex_cntl);
213 OUT_RING(tex->tex_combine_cntl);
214 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
215 OUT_RING(tex->tex_offset[i]);
216 }
217
218 OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
219 OUT_RING(tex->tex_border_color);
220
221 ADVANCE_RING();
222 }
223
224 static void r128_emit_state(drm_r128_private_t * dev_priv)
225 {
226 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
227 unsigned int dirty = sarea_priv->dirty;
228
229 DRM_DEBUG("%s: dirty=0x%08x\n", __FUNCTION__, dirty);
230
231 if (dirty & R128_UPLOAD_CORE) {
232 r128_emit_core(dev_priv);
233 sarea_priv->dirty &= ~R128_UPLOAD_CORE;
234 }
235
236 if (dirty & R128_UPLOAD_CONTEXT) {
237 r128_emit_context(dev_priv);
238 sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
239 }
240
241 if (dirty & R128_UPLOAD_SETUP) {
242 r128_emit_setup(dev_priv);
243 sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
244 }
245
246 if (dirty & R128_UPLOAD_MASKS) {
247 r128_emit_masks(dev_priv);
248 sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
249 }
250
251 if (dirty & R128_UPLOAD_WINDOW) {
252 r128_emit_window(dev_priv);
253 sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
254 }
255
256 if (dirty & R128_UPLOAD_TEX0) {
257 r128_emit_tex0(dev_priv);
258 sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
259 }
260
261 if (dirty & R128_UPLOAD_TEX1) {
262 r128_emit_tex1(dev_priv);
263 sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
264 }
265
266 /* Turn off the texture cache flushing */
267 sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
268
269 sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
270 }
271
272 #if R128_PERFORMANCE_BOXES
273 /* ================================================================
274 * Performance monitoring functions
275 */
276
277 static void r128_clear_box(drm_r128_private_t * dev_priv,
278 int x, int y, int w, int h, int r, int g, int b)
279 {
280 u32 pitch, offset;
281 u32 fb_bpp, color;
282 RING_LOCALS;
283
284 switch (dev_priv->fb_bpp) {
285 case 16:
286 fb_bpp = R128_GMC_DST_16BPP;
287 color = (((r & 0xf8) << 8) |
288 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
289 break;
290 case 24:
291 fb_bpp = R128_GMC_DST_24BPP;
292 color = ((r << 16) | (g << 8) | b);
293 break;
294 case 32:
295 fb_bpp = R128_GMC_DST_32BPP;
296 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
297 break;
298 default:
299 return;
300 }
301
302 offset = dev_priv->back_offset;
303 pitch = dev_priv->back_pitch >> 3;
304
305 BEGIN_RING(6);
306
307 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
308 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
309 R128_GMC_BRUSH_SOLID_COLOR |
310 fb_bpp |
311 R128_GMC_SRC_DATATYPE_COLOR |
312 R128_ROP3_P |
313 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
314
315 OUT_RING((pitch << 21) | (offset >> 5));
316 OUT_RING(color);
317
318 OUT_RING((x << 16) | y);
319 OUT_RING((w << 16) | h);
320
321 ADVANCE_RING();
322 }
323
324 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
325 {
326 if (atomic_read(&dev_priv->idle_count) == 0) {
327 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
328 } else {
329 atomic_set(&dev_priv->idle_count, 0);
330 }
331 }
332
333 #endif
334
335 /* ================================================================
336 * CCE command dispatch functions
337 */
338
339 static void r128_print_dirty(const char *msg, unsigned int flags)
340 {
341 DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
342 msg,
343 flags,
344 (flags & R128_UPLOAD_CORE) ? "core, " : "",
345 (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
346 (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
347 (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
348 (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
349 (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
350 (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
351 (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
352 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
353 }
354
355 static void r128_cce_dispatch_clear(drm_device_t * dev,
356 drm_r128_clear_t * clear)
357 {
358 drm_r128_private_t *dev_priv = dev->dev_private;
359 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
360 int nbox = sarea_priv->nbox;
361 drm_clip_rect_t *pbox = sarea_priv->boxes;
362 unsigned int flags = clear->flags;
363 int i;
364 RING_LOCALS;
365 DRM_DEBUG("%s\n", __FUNCTION__);
366
367 if (dev_priv->page_flipping && dev_priv->current_page == 1) {
368 unsigned int tmp = flags;
369
370 flags &= ~(R128_FRONT | R128_BACK);
371 if (tmp & R128_FRONT)
372 flags |= R128_BACK;
373 if (tmp & R128_BACK)
374 flags |= R128_FRONT;
375 }
376
377 for (i = 0; i < nbox; i++) {
378 int x = pbox[i].x1;
379 int y = pbox[i].y1;
380 int w = pbox[i].x2 - x;
381 int h = pbox[i].y2 - y;
382
383 DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
384 pbox[i].x1, pbox[i].y1, pbox[i].x2,
385 pbox[i].y2, flags);
386
387 if (flags & (R128_FRONT | R128_BACK)) {
388 BEGIN_RING(2);
389
390 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
391 OUT_RING(clear->color_mask);
392
393 ADVANCE_RING();
394 }
395
396 if (flags & R128_FRONT) {
397 BEGIN_RING(6);
398
399 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
400 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
401 R128_GMC_BRUSH_SOLID_COLOR |
402 (dev_priv->color_fmt << 8) |
403 R128_GMC_SRC_DATATYPE_COLOR |
404 R128_ROP3_P |
405 R128_GMC_CLR_CMP_CNTL_DIS |
406 R128_GMC_AUX_CLIP_DIS);
407
408 OUT_RING(dev_priv->front_pitch_offset_c);
409 OUT_RING(clear->clear_color);
410
411 OUT_RING((x << 16) | y);
412 OUT_RING((w << 16) | h);
413
414 ADVANCE_RING();
415 }
416
417 if (flags & R128_BACK) {
418 BEGIN_RING(6);
419
420 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
421 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
422 R128_GMC_BRUSH_SOLID_COLOR |
423 (dev_priv->color_fmt << 8) |
424 R128_GMC_SRC_DATATYPE_COLOR |
425 R128_ROP3_P |
426 R128_GMC_CLR_CMP_CNTL_DIS |
427 R128_GMC_AUX_CLIP_DIS);
428
429 OUT_RING(dev_priv->back_pitch_offset_c);
430 OUT_RING(clear->clear_color);
431
432 OUT_RING((x << 16) | y);
433 OUT_RING((w << 16) | h);
434
435 ADVANCE_RING();
436 }
437
438 if (flags & R128_DEPTH) {
439 BEGIN_RING(6);
440
441 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
442 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
443 R128_GMC_BRUSH_SOLID_COLOR |
444 (dev_priv->depth_fmt << 8) |
445 R128_GMC_SRC_DATATYPE_COLOR |
446 R128_ROP3_P |
447 R128_GMC_CLR_CMP_CNTL_DIS |
448 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
449
450 OUT_RING(dev_priv->depth_pitch_offset_c);
451 OUT_RING(clear->clear_depth);
452
453 OUT_RING((x << 16) | y);
454 OUT_RING((w << 16) | h);
455
456 ADVANCE_RING();
457 }
458 }
459 }
460
461 static void r128_cce_dispatch_swap(drm_device_t * dev)
462 {
463 drm_r128_private_t *dev_priv = dev->dev_private;
464 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
465 int nbox = sarea_priv->nbox;
466 drm_clip_rect_t *pbox = sarea_priv->boxes;
467 int i;
468 RING_LOCALS;
469 DRM_DEBUG("%s\n", __FUNCTION__);
470
471 #if R128_PERFORMANCE_BOXES
472 /* Do some trivial performance monitoring...
473 */
474 r128_cce_performance_boxes(dev_priv);
475 #endif
476
477 for (i = 0; i < nbox; i++) {
478 int x = pbox[i].x1;
479 int y = pbox[i].y1;
480 int w = pbox[i].x2 - x;
481 int h = pbox[i].y2 - y;
482
483 BEGIN_RING(7);
484
485 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
486 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
487 R128_GMC_DST_PITCH_OFFSET_CNTL |
488 R128_GMC_BRUSH_NONE |
489 (dev_priv->color_fmt << 8) |
490 R128_GMC_SRC_DATATYPE_COLOR |
491 R128_ROP3_S |
492 R128_DP_SRC_SOURCE_MEMORY |
493 R128_GMC_CLR_CMP_CNTL_DIS |
494 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
495
496 /* Make this work even if front & back are flipped:
497 */
498 if (dev_priv->current_page == 0) {
499 OUT_RING(dev_priv->back_pitch_offset_c);
500 OUT_RING(dev_priv->front_pitch_offset_c);
501 } else {
502 OUT_RING(dev_priv->front_pitch_offset_c);
503 OUT_RING(dev_priv->back_pitch_offset_c);
504 }
505
506 OUT_RING((x << 16) | y);
507 OUT_RING((x << 16) | y);
508 OUT_RING((w << 16) | h);
509
510 ADVANCE_RING();
511 }
512
513 /* Increment the frame counter. The client-side 3D driver must
514 * throttle the framerate by waiting for this value before
515 * performing the swapbuffer ioctl.
516 */
517 dev_priv->sarea_priv->last_frame++;
518
519 BEGIN_RING(2);
520
521 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
522 OUT_RING(dev_priv->sarea_priv->last_frame);
523
524 ADVANCE_RING();
525 }
526
527 static void r128_cce_dispatch_flip(drm_device_t * dev)
528 {
529 drm_r128_private_t *dev_priv = dev->dev_private;
530 RING_LOCALS;
531 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
532 __FUNCTION__,
533 dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
534
535 #if R128_PERFORMANCE_BOXES
536 /* Do some trivial performance monitoring...
537 */
538 r128_cce_performance_boxes(dev_priv);
539 #endif
540
541 BEGIN_RING(4);
542
543 R128_WAIT_UNTIL_PAGE_FLIPPED();
544 OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
545
546 if (dev_priv->current_page == 0) {
547 OUT_RING(dev_priv->back_offset);
548 } else {
549 OUT_RING(dev_priv->front_offset);
550 }
551
552 ADVANCE_RING();
553
554 /* Increment the frame counter. The client-side 3D driver must
555 * throttle the framerate by waiting for this value before
556 * performing the swapbuffer ioctl.
557 */
558 dev_priv->sarea_priv->last_frame++;
559 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
560 1 - dev_priv->current_page;
561
562 BEGIN_RING(2);
563
564 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
565 OUT_RING(dev_priv->sarea_priv->last_frame);
566
567 ADVANCE_RING();
568 }
569
570 static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
571 {
572 drm_r128_private_t *dev_priv = dev->dev_private;
573 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
574 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
575 int format = sarea_priv->vc_format;
576 int offset = buf->bus_address;
577 int size = buf->used;
578 int prim = buf_priv->prim;
579 int i = 0;
580 RING_LOCALS;
581 DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox);
582
583 if (0)
584 r128_print_dirty("dispatch_vertex", sarea_priv->dirty);
585
586 if (buf->used) {
587 buf_priv->dispatched = 1;
588
589 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
590 r128_emit_state(dev_priv);
591 }
592
593 do {
594 /* Emit the next set of up to three cliprects */
595 if (i < sarea_priv->nbox) {
596 r128_emit_clip_rects(dev_priv,
597 &sarea_priv->boxes[i],
598 sarea_priv->nbox - i);
599 }
600
601 /* Emit the vertex buffer rendering commands */
602 BEGIN_RING(5);
603
604 OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3));
605 OUT_RING(offset);
606 OUT_RING(size);
607 OUT_RING(format);
608 OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
609 (size << R128_CCE_VC_CNTL_NUM_SHIFT));
610
611 ADVANCE_RING();
612
613 i += 3;
614 } while (i < sarea_priv->nbox);
615 }
616
617 if (buf_priv->discard) {
618 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
619
620 /* Emit the vertex buffer age */
621 BEGIN_RING(2);
622
623 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
624 OUT_RING(buf_priv->age);
625
626 ADVANCE_RING();
627
628 buf->pending = 1;
629 buf->used = 0;
630 /* FIXME: Check dispatched field */
631 buf_priv->dispatched = 0;
632 }
633
634 dev_priv->sarea_priv->last_dispatch++;
635
636 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
637 sarea_priv->nbox = 0;
638 }
639
640 static void r128_cce_dispatch_indirect(drm_device_t * dev,
641 drm_buf_t * buf, int start, int end)
642 {
643 drm_r128_private_t *dev_priv = dev->dev_private;
644 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
645 RING_LOCALS;
646 DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
647
648 if (start != end) {
649 int offset = buf->bus_address + start;
650 int dwords = (end - start + 3) / sizeof(u32);
651
652 /* Indirect buffer data must be an even number of
653 * dwords, so if we've been given an odd number we must
654 * pad the data with a Type-2 CCE packet.
655 */
656 if (dwords & 1) {
657 u32 *data = (u32 *)
658 ((char *)dev->agp_buffer_map->handle
659 + buf->offset + start);
660 data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
661 }
662
663 buf_priv->dispatched = 1;
664
665 /* Fire off the indirect buffer */
666 BEGIN_RING(3);
667
668 OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1));
669 OUT_RING(offset);
670 OUT_RING(dwords);
671
672 ADVANCE_RING();
673 }
674
675 if (buf_priv->discard) {
676 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
677
678 /* Emit the indirect buffer age */
679 BEGIN_RING(2);
680
681 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
682 OUT_RING(buf_priv->age);
683
684 ADVANCE_RING();
685
686 buf->pending = 1;
687 buf->used = 0;
688 /* FIXME: Check dispatched field */
689 buf_priv->dispatched = 0;
690 }
691
692 dev_priv->sarea_priv->last_dispatch++;
693 }
694
695 static void r128_cce_dispatch_indices(drm_device_t * dev,
696 drm_buf_t * buf,
697 int start, int end, int count)
698 {
699 drm_r128_private_t *dev_priv = dev->dev_private;
700 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
701 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
702 int format = sarea_priv->vc_format;
703 int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
704 int prim = buf_priv->prim;
705 u32 *data;
706 int dwords;
707 int i = 0;
708 RING_LOCALS;
709 DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count);
710
711 if (0)
712 r128_print_dirty("dispatch_indices", sarea_priv->dirty);
713
714 if (start != end) {
715 buf_priv->dispatched = 1;
716
717 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
718 r128_emit_state(dev_priv);
719 }
720
721 dwords = (end - start + 3) / sizeof(u32);
722
723 data = (u32 *) ((char *)dev->agp_buffer_map->handle
724 + buf->offset + start);
725
726 data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,
727 dwords - 2));
728
729 data[1] = cpu_to_le32(offset);
730 data[2] = cpu_to_le32(R128_MAX_VB_VERTS);
731 data[3] = cpu_to_le32(format);
732 data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
733 (count << 16)));
734
735 if (count & 0x1) {
736 #ifdef __LITTLE_ENDIAN
737 data[dwords - 1] &= 0x0000ffff;
738 #else
739 data[dwords - 1] &= 0xffff0000;
740 #endif
741 }
742
743 do {
744 /* Emit the next set of up to three cliprects */
745 if (i < sarea_priv->nbox) {
746 r128_emit_clip_rects(dev_priv,
747 &sarea_priv->boxes[i],
748 sarea_priv->nbox - i);
749 }
750
751 r128_cce_dispatch_indirect(dev, buf, start, end);
752
753 i += 3;
754 } while (i < sarea_priv->nbox);
755 }
756
757 if (buf_priv->discard) {
758 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
759
760 /* Emit the vertex buffer age */
761 BEGIN_RING(2);
762
763 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
764 OUT_RING(buf_priv->age);
765
766 ADVANCE_RING();
767
768 buf->pending = 1;
769 /* FIXME: Check dispatched field */
770 buf_priv->dispatched = 0;
771 }
772
773 dev_priv->sarea_priv->last_dispatch++;
774
775 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
776 sarea_priv->nbox = 0;
777 }
778
779 static int r128_cce_dispatch_blit(DRMFILE filp,
780 drm_device_t * dev, drm_r128_blit_t * blit)
781 {
782 drm_r128_private_t *dev_priv = dev->dev_private;
783 drm_device_dma_t *dma = dev->dma;
784 drm_buf_t *buf;
785 drm_r128_buf_priv_t *buf_priv;
786 u32 *data;
787 int dword_shift, dwords;
788 RING_LOCALS;
789 DRM_DEBUG("\n");
790
791 /* The compiler won't optimize away a division by a variable,
792 * even if the only legal values are powers of two. Thus, we'll
793 * use a shift instead.
794 */
795 switch (blit->format) {
796 case R128_DATATYPE_ARGB8888:
797 dword_shift = 0;
798 break;
799 case R128_DATATYPE_ARGB1555:
800 case R128_DATATYPE_RGB565:
801 case R128_DATATYPE_ARGB4444:
802 case R128_DATATYPE_YVYU422:
803 case R128_DATATYPE_VYUY422:
804 dword_shift = 1;
805 break;
806 case R128_DATATYPE_CI8:
807 case R128_DATATYPE_RGB8:
808 dword_shift = 2;
809 break;
810 default:
811 DRM_ERROR("invalid blit format %d\n", blit->format);
812 return DRM_ERR(EINVAL);
813 }
814
815 /* Flush the pixel cache, and mark the contents as Read Invalid.
816 * This ensures no pixel data gets mixed up with the texture
817 * data from the host data blit, otherwise part of the texture
818 * image may be corrupted.
819 */
820 BEGIN_RING(2);
821
822 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
823 OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI);
824
825 ADVANCE_RING();
826
827 /* Dispatch the indirect buffer.
828 */
829 buf = dma->buflist[blit->idx];
830 buf_priv = buf->dev_private;
831
832 if (buf->filp != filp) {
833 DRM_ERROR("process %d using buffer owned by %p\n",
834 DRM_CURRENTPID, buf->filp);
835 return DRM_ERR(EINVAL);
836 }
837 if (buf->pending) {
838 DRM_ERROR("sending pending buffer %d\n", blit->idx);
839 return DRM_ERR(EINVAL);
840 }
841
842 buf_priv->discard = 1;
843
844 dwords = (blit->width * blit->height) >> dword_shift;
845
846 data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
847
848 data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6));
849 data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL |
850 R128_GMC_BRUSH_NONE |
851 (blit->format << 8) |
852 R128_GMC_SRC_DATATYPE_COLOR |
853 R128_ROP3_S |
854 R128_DP_SRC_SOURCE_HOST_DATA |
855 R128_GMC_CLR_CMP_CNTL_DIS |
856 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
857
858 data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
859 data[3] = cpu_to_le32(0xffffffff);
860 data[4] = cpu_to_le32(0xffffffff);
861 data[5] = cpu_to_le32((blit->y << 16) | blit->x);
862 data[6] = cpu_to_le32((blit->height << 16) | blit->width);
863 data[7] = cpu_to_le32(dwords);
864
865 buf->used = (dwords + 8) * sizeof(u32);
866
867 r128_cce_dispatch_indirect(dev, buf, 0, buf->used);
868
869 /* Flush the pixel cache after the blit completes. This ensures
870 * the texture data is written out to memory before rendering
871 * continues.
872 */
873 BEGIN_RING(2);
874
875 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
876 OUT_RING(R128_PC_FLUSH_GUI);
877
878 ADVANCE_RING();
879
880 return 0;
881 }
882
883 /* ================================================================
884 * Tiled depth buffer management
885 *
886 * FIXME: These should all set the destination write mask for when we
887 * have hardware stencil support.
888 */
889
890 static int r128_cce_dispatch_write_span(drm_device_t * dev,
891 drm_r128_depth_t * depth)
892 {
893 drm_r128_private_t *dev_priv = dev->dev_private;
894 int count, x, y;
895 u32 *buffer;
896 u8 *mask;
897 int i, buffer_size, mask_size;
898 RING_LOCALS;
899 DRM_DEBUG("\n");
900
901 count = depth->n;
902 if (count > 4096 || count <= 0)
903 return DRM_ERR(EMSGSIZE);
904
905 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
906 return DRM_ERR(EFAULT);
907 }
908 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
909 return DRM_ERR(EFAULT);
910 }
911
912 buffer_size = depth->n * sizeof(u32);
913 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
914 if (buffer == NULL)
915 return DRM_ERR(ENOMEM);
916 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
917 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
918 return DRM_ERR(EFAULT);
919 }
920
921 mask_size = depth->n * sizeof(u8);
922 if (depth->mask) {
923 mask = drm_alloc(mask_size, DRM_MEM_BUFS);
924 if (mask == NULL) {
925 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
926 return DRM_ERR(ENOMEM);
927 }
928 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
929 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
930 drm_free(mask, mask_size, DRM_MEM_BUFS);
931 return DRM_ERR(EFAULT);
932 }
933
934 for (i = 0; i < count; i++, x++) {
935 if (mask[i]) {
936 BEGIN_RING(6);
937
938 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
939 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
940 R128_GMC_BRUSH_SOLID_COLOR |
941 (dev_priv->depth_fmt << 8) |
942 R128_GMC_SRC_DATATYPE_COLOR |
943 R128_ROP3_P |
944 R128_GMC_CLR_CMP_CNTL_DIS |
945 R128_GMC_WR_MSK_DIS);
946
947 OUT_RING(dev_priv->depth_pitch_offset_c);
948 OUT_RING(buffer[i]);
949
950 OUT_RING((x << 16) | y);
951 OUT_RING((1 << 16) | 1);
952
953 ADVANCE_RING();
954 }
955 }
956
957 drm_free(mask, mask_size, DRM_MEM_BUFS);
958 } else {
959 for (i = 0; i < count; i++, x++) {
960 BEGIN_RING(6);
961
962 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
963 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
964 R128_GMC_BRUSH_SOLID_COLOR |
965 (dev_priv->depth_fmt << 8) |
966 R128_GMC_SRC_DATATYPE_COLOR |
967 R128_ROP3_P |
968 R128_GMC_CLR_CMP_CNTL_DIS |
969 R128_GMC_WR_MSK_DIS);
970
971 OUT_RING(dev_priv->depth_pitch_offset_c);
972 OUT_RING(buffer[i]);
973
974 OUT_RING((x << 16) | y);
975 OUT_RING((1 << 16) | 1);
976
977 ADVANCE_RING();
978 }
979 }
980
981 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
982
983 return 0;
984 }
985
986 static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
987 drm_r128_depth_t * depth)
988 {
989 drm_r128_private_t *dev_priv = dev->dev_private;
990 int count, *x, *y;
991 u32 *buffer;
992 u8 *mask;
993 int i, xbuf_size, ybuf_size, buffer_size, mask_size;
994 RING_LOCALS;
995 DRM_DEBUG("\n");
996
997 count = depth->n;
998 if (count > 4096 || count <= 0)
999 return DRM_ERR(EMSGSIZE);
1000
1001 xbuf_size = count * sizeof(*x);
1002 ybuf_size = count * sizeof(*y);
1003 x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
1004 if (x == NULL) {
1005 return DRM_ERR(ENOMEM);
1006 }
1007 y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
1008 if (y == NULL) {
1009 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1010 return DRM_ERR(ENOMEM);
1011 }
1012 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1013 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1014 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1015 return DRM_ERR(EFAULT);
1016 }
1017 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
1018 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1019 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1020 return DRM_ERR(EFAULT);
1021 }
1022
1023 buffer_size = depth->n * sizeof(u32);
1024 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
1025 if (buffer == NULL) {
1026 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1027 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1028 return DRM_ERR(ENOMEM);
1029 }
1030 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
1031 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1032 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1033 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1034 return DRM_ERR(EFAULT);
1035 }
1036
1037 if (depth->mask) {
1038 mask_size = depth->n * sizeof(u8);
1039 mask = drm_alloc(mask_size, DRM_MEM_BUFS);
1040 if (mask == NULL) {
1041 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1042 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1043 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1044 return DRM_ERR(ENOMEM);
1045 }
1046 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
1047 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1048 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1049 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1050 drm_free(mask, mask_size, DRM_MEM_BUFS);
1051 return DRM_ERR(EFAULT);
1052 }
1053
1054 for (i = 0; i < count; i++) {
1055 if (mask[i]) {
1056 BEGIN_RING(6);
1057
1058 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1059 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1060 R128_GMC_BRUSH_SOLID_COLOR |
1061 (dev_priv->depth_fmt << 8) |
1062 R128_GMC_SRC_DATATYPE_COLOR |
1063 R128_ROP3_P |
1064 R128_GMC_CLR_CMP_CNTL_DIS |
1065 R128_GMC_WR_MSK_DIS);
1066
1067 OUT_RING(dev_priv->depth_pitch_offset_c);
1068 OUT_RING(buffer[i]);
1069
1070 OUT_RING((x[i] << 16) | y[i]);
1071 OUT_RING((1 << 16) | 1);
1072
1073 ADVANCE_RING();
1074 }
1075 }
1076
1077 drm_free(mask, mask_size, DRM_MEM_BUFS);
1078 } else {
1079 for (i = 0; i < count; i++) {
1080 BEGIN_RING(6);
1081
1082 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1083 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1084 R128_GMC_BRUSH_SOLID_COLOR |
1085 (dev_priv->depth_fmt << 8) |
1086 R128_GMC_SRC_DATATYPE_COLOR |
1087 R128_ROP3_P |
1088 R128_GMC_CLR_CMP_CNTL_DIS |
1089 R128_GMC_WR_MSK_DIS);
1090
1091 OUT_RING(dev_priv->depth_pitch_offset_c);
1092 OUT_RING(buffer[i]);
1093
1094 OUT_RING((x[i] << 16) | y[i]);
1095 OUT_RING((1 << 16) | 1);
1096
1097 ADVANCE_RING();
1098 }
1099 }
1100
1101 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1102 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1103 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1104
1105 return 0;
1106 }
1107
1108 static int r128_cce_dispatch_read_span(drm_device_t * dev,
1109 drm_r128_depth_t * depth)
1110 {
1111 drm_r128_private_t *dev_priv = dev->dev_private;
1112 int count, x, y;
1113 RING_LOCALS;
1114 DRM_DEBUG("\n");
1115
1116 count = depth->n;
1117 if (count > 4096 || count <= 0)
1118 return DRM_ERR(EMSGSIZE);
1119
1120 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
1121 return DRM_ERR(EFAULT);
1122 }
1123 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
1124 return DRM_ERR(EFAULT);
1125 }
1126
1127 BEGIN_RING(7);
1128
1129 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1130 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1131 R128_GMC_DST_PITCH_OFFSET_CNTL |
1132 R128_GMC_BRUSH_NONE |
1133 (dev_priv->depth_fmt << 8) |
1134 R128_GMC_SRC_DATATYPE_COLOR |
1135 R128_ROP3_S |
1136 R128_DP_SRC_SOURCE_MEMORY |
1137 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1138
1139 OUT_RING(dev_priv->depth_pitch_offset_c);
1140 OUT_RING(dev_priv->span_pitch_offset_c);
1141
1142 OUT_RING((x << 16) | y);
1143 OUT_RING((0 << 16) | 0);
1144 OUT_RING((count << 16) | 1);
1145
1146 ADVANCE_RING();
1147
1148 return 0;
1149 }
1150
1151 static int r128_cce_dispatch_read_pixels(drm_device_t * dev,
1152 drm_r128_depth_t * depth)
1153 {
1154 drm_r128_private_t *dev_priv = dev->dev_private;
1155 int count, *x, *y;
1156 int i, xbuf_size, ybuf_size;
1157 RING_LOCALS;
1158 DRM_DEBUG("%s\n", __FUNCTION__);
1159
1160 count = depth->n;
1161 if (count > 4096 || count <= 0)
1162 return DRM_ERR(EMSGSIZE);
1163
1164 if (count > dev_priv->depth_pitch) {
1165 count = dev_priv->depth_pitch;
1166 }
1167
1168 xbuf_size = count * sizeof(*x);
1169 ybuf_size = count * sizeof(*y);
1170 x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
1171 if (x == NULL) {
1172 return DRM_ERR(ENOMEM);
1173 }
1174 y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
1175 if (y == NULL) {
1176 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1177 return DRM_ERR(ENOMEM);
1178 }
1179 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1180 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1181 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1182 return DRM_ERR(EFAULT);
1183 }
1184 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
1185 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1186 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1187 return DRM_ERR(EFAULT);
1188 }
1189
1190 for (i = 0; i < count; i++) {
1191 BEGIN_RING(7);
1192
1193 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1194 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1195 R128_GMC_DST_PITCH_OFFSET_CNTL |
1196 R128_GMC_BRUSH_NONE |
1197 (dev_priv->depth_fmt << 8) |
1198 R128_GMC_SRC_DATATYPE_COLOR |
1199 R128_ROP3_S |
1200 R128_DP_SRC_SOURCE_MEMORY |
1201 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1202
1203 OUT_RING(dev_priv->depth_pitch_offset_c);
1204 OUT_RING(dev_priv->span_pitch_offset_c);
1205
1206 OUT_RING((x[i] << 16) | y[i]);
1207 OUT_RING((i << 16) | 0);
1208 OUT_RING((1 << 16) | 1);
1209
1210 ADVANCE_RING();
1211 }
1212
1213 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1214 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1215
1216 return 0;
1217 }
1218
1219 /* ================================================================
1220 * Polygon stipple
1221 */
1222
1223 static void r128_cce_dispatch_stipple(drm_device_t * dev, u32 * stipple)
1224 {
1225 drm_r128_private_t *dev_priv = dev->dev_private;
1226 int i;
1227 RING_LOCALS;
1228 DRM_DEBUG("%s\n", __FUNCTION__);
1229
1230 BEGIN_RING(33);
1231
1232 OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
1233 for (i = 0; i < 32; i++) {
1234 OUT_RING(stipple[i]);
1235 }
1236
1237 ADVANCE_RING();
1238 }
1239
1240 /* ================================================================
1241 * IOCTL functions
1242 */
1243
1244 static int r128_cce_clear(DRM_IOCTL_ARGS)
1245 {
1246 DRM_DEVICE;
1247 drm_r128_private_t *dev_priv = dev->dev_private;
1248 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1249 drm_r128_clear_t clear;
1250 DRM_DEBUG("\n");
1251
1252 LOCK_TEST_WITH_RETURN(dev, filp);
1253
1254 DRM_COPY_FROM_USER_IOCTL(clear, (drm_r128_clear_t __user *) data,
1255 sizeof(clear));
1256
1257 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1258
1259 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1260 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1261
1262 r128_cce_dispatch_clear(dev, &clear);
1263 COMMIT_RING();
1264
1265 /* Make sure we restore the 3D state next time.
1266 */
1267 dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
1268
1269 return 0;
1270 }
1271
1272 static int r128_do_init_pageflip(drm_device_t * dev)
1273 {
1274 drm_r128_private_t *dev_priv = dev->dev_private;
1275 DRM_DEBUG("\n");
1276
1277 dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET);
1278 dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL);
1279
1280 R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset);
1281 R128_WRITE(R128_CRTC_OFFSET_CNTL,
1282 dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL);
1283
1284 dev_priv->page_flipping = 1;
1285 dev_priv->current_page = 0;
1286 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1287
1288 return 0;
1289 }
1290
1291 static int r128_do_cleanup_pageflip(drm_device_t * dev)
1292 {
1293 drm_r128_private_t *dev_priv = dev->dev_private;
1294 DRM_DEBUG("\n");
1295
1296 R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset);
1297 R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl);
1298
1299 if (dev_priv->current_page != 0) {
1300 r128_cce_dispatch_flip(dev);
1301 COMMIT_RING();
1302 }
1303
1304 dev_priv->page_flipping = 0;
1305 return 0;
1306 }
1307
1308 /* Swapping and flipping are different operations, need different ioctls.
1309 * They can & should be intermixed to support multiple 3d windows.
1310 */
1311
1312 static int r128_cce_flip(DRM_IOCTL_ARGS)
1313 {
1314 DRM_DEVICE;
1315 drm_r128_private_t *dev_priv = dev->dev_private;
1316 DRM_DEBUG("%s\n", __FUNCTION__);
1317
1318 LOCK_TEST_WITH_RETURN(dev, filp);
1319
1320 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1321
1322 if (!dev_priv->page_flipping)
1323 r128_do_init_pageflip(dev);
1324
1325 r128_cce_dispatch_flip(dev);
1326
1327 COMMIT_RING();
1328 return 0;
1329 }
1330
1331 static int r128_cce_swap(DRM_IOCTL_ARGS)
1332 {
1333 DRM_DEVICE;
1334 drm_r128_private_t *dev_priv = dev->dev_private;
1335 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1336 DRM_DEBUG("%s\n", __FUNCTION__);
1337
1338 LOCK_TEST_WITH_RETURN(dev, filp);
1339
1340 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1341
1342 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1343 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1344
1345 r128_cce_dispatch_swap(dev);
1346 dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
1347 R128_UPLOAD_MASKS);
1348
1349 COMMIT_RING();
1350 return 0;
1351 }
1352
1353 static int r128_cce_vertex(DRM_IOCTL_ARGS)
1354 {
1355 DRM_DEVICE;
1356 drm_r128_private_t *dev_priv = dev->dev_private;
1357 drm_device_dma_t *dma = dev->dma;
1358 drm_buf_t *buf;
1359 drm_r128_buf_priv_t *buf_priv;
1360 drm_r128_vertex_t vertex;
1361
1362 LOCK_TEST_WITH_RETURN(dev, filp);
1363
1364 if (!dev_priv) {
1365 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1366 return DRM_ERR(EINVAL);
1367 }
1368
1369 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_r128_vertex_t __user *) data,
1370 sizeof(vertex));
1371
1372 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
1373 DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard);
1374
1375 if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
1376 DRM_ERROR("buffer index %d (of %d max)\n",
1377 vertex.idx, dma->buf_count - 1);
1378 return DRM_ERR(EINVAL);
1379 }
1380 if (vertex.prim < 0 ||
1381 vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1382 DRM_ERROR("buffer prim %d\n", vertex.prim);
1383 return DRM_ERR(EINVAL);
1384 }
1385
1386 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1387 VB_AGE_TEST_WITH_RETURN(dev_priv);
1388
1389 buf = dma->buflist[vertex.idx];
1390 buf_priv = buf->dev_private;
1391
1392 if (buf->filp != filp) {
1393 DRM_ERROR("process %d using buffer owned by %p\n",
1394 DRM_CURRENTPID, buf->filp);
1395 return DRM_ERR(EINVAL);
1396 }
1397 if (buf->pending) {
1398 DRM_ERROR("sending pending buffer %d\n", vertex.idx);
1399 return DRM_ERR(EINVAL);
1400 }
1401
1402 buf->used = vertex.count;
1403 buf_priv->prim = vertex.prim;
1404 buf_priv->discard = vertex.discard;
1405
1406 r128_cce_dispatch_vertex(dev, buf);
1407
1408 COMMIT_RING();
1409 return 0;
1410 }
1411
1412 static int r128_cce_indices(DRM_IOCTL_ARGS)
1413 {
1414 DRM_DEVICE;
1415 drm_r128_private_t *dev_priv = dev->dev_private;
1416 drm_device_dma_t *dma = dev->dma;
1417 drm_buf_t *buf;
1418 drm_r128_buf_priv_t *buf_priv;
1419 drm_r128_indices_t elts;
1420 int count;
1421
1422 LOCK_TEST_WITH_RETURN(dev, filp);
1423
1424 if (!dev_priv) {
1425 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1426 return DRM_ERR(EINVAL);
1427 }
1428
1429 DRM_COPY_FROM_USER_IOCTL(elts, (drm_r128_indices_t __user *) data,
1430 sizeof(elts));
1431
1432 DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
1433 elts.idx, elts.start, elts.end, elts.discard);
1434
1435 if (elts.idx < 0 || elts.idx >= dma->buf_count) {
1436 DRM_ERROR("buffer index %d (of %d max)\n",
1437 elts.idx, dma->buf_count - 1);
1438 return DRM_ERR(EINVAL);
1439 }
1440 if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1441 DRM_ERROR("buffer prim %d\n", elts.prim);
1442 return DRM_ERR(EINVAL);
1443 }
1444
1445 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1446 VB_AGE_TEST_WITH_RETURN(dev_priv);
1447
1448 buf = dma->buflist[elts.idx];
1449 buf_priv = buf->dev_private;
1450
1451 if (buf->filp != filp) {
1452 DRM_ERROR("process %d using buffer owned by %p\n",
1453 DRM_CURRENTPID, buf->filp);
1454 return DRM_ERR(EINVAL);
1455 }
1456 if (buf->pending) {
1457 DRM_ERROR("sending pending buffer %d\n", elts.idx);
1458 return DRM_ERR(EINVAL);
1459 }
1460
1461 count = (elts.end - elts.start) / sizeof(u16);
1462 elts.start -= R128_INDEX_PRIM_OFFSET;
1463
1464 if (elts.start & 0x7) {
1465 DRM_ERROR("misaligned buffer 0x%x\n", elts.start);
1466 return DRM_ERR(EINVAL);
1467 }
1468 if (elts.start < buf->used) {
1469 DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used);
1470 return DRM_ERR(EINVAL);
1471 }
1472
1473 buf->used = elts.end;
1474 buf_priv->prim = elts.prim;
1475 buf_priv->discard = elts.discard;
1476
1477 r128_cce_dispatch_indices(dev, buf, elts.start, elts.end, count);
1478
1479 COMMIT_RING();
1480 return 0;
1481 }
1482
1483 static int r128_cce_blit(DRM_IOCTL_ARGS)
1484 {
1485 DRM_DEVICE;
1486 drm_device_dma_t *dma = dev->dma;
1487 drm_r128_private_t *dev_priv = dev->dev_private;
1488 drm_r128_blit_t blit;
1489 int ret;
1490
1491 LOCK_TEST_WITH_RETURN(dev, filp);
1492
1493 DRM_COPY_FROM_USER_IOCTL(blit, (drm_r128_blit_t __user *) data,
1494 sizeof(blit));
1495
1496 DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit.idx);
1497
1498 if (blit.idx < 0 || blit.idx >= dma->buf_count) {
1499 DRM_ERROR("buffer index %d (of %d max)\n",
1500 blit.idx, dma->buf_count - 1);
1501 return DRM_ERR(EINVAL);
1502 }
1503
1504 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1505 VB_AGE_TEST_WITH_RETURN(dev_priv);
1506
1507 ret = r128_cce_dispatch_blit(filp, dev, &blit);
1508
1509 COMMIT_RING();
1510 return ret;
1511 }
1512
1513 static int r128_cce_depth(DRM_IOCTL_ARGS)
1514 {
1515 DRM_DEVICE;
1516 drm_r128_private_t *dev_priv = dev->dev_private;
1517 drm_r128_depth_t depth;
1518 int ret;
1519
1520 LOCK_TEST_WITH_RETURN(dev, filp);
1521
1522 DRM_COPY_FROM_USER_IOCTL(depth, (drm_r128_depth_t __user *) data,
1523 sizeof(depth));
1524
1525 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1526
1527 ret = DRM_ERR(EINVAL);
1528 switch (depth.func) {
1529 case R128_WRITE_SPAN:
1530 ret = r128_cce_dispatch_write_span(dev, &depth);
1531 break;
1532 case R128_WRITE_PIXELS:
1533 ret = r128_cce_dispatch_write_pixels(dev, &depth);
1534 break;
1535 case R128_READ_SPAN:
1536 ret = r128_cce_dispatch_read_span(dev, &depth);
1537 break;
1538 case R128_READ_PIXELS:
1539 ret = r128_cce_dispatch_read_pixels(dev, &depth);
1540 break;
1541 }
1542
1543 COMMIT_RING();
1544 return ret;
1545 }
1546
1547 static int r128_cce_stipple(DRM_IOCTL_ARGS)
1548 {
1549 DRM_DEVICE;
1550 drm_r128_private_t *dev_priv = dev->dev_private;
1551 drm_r128_stipple_t stipple;
1552 u32 mask[32];
1553
1554 LOCK_TEST_WITH_RETURN(dev, filp);
1555
1556 DRM_COPY_FROM_USER_IOCTL(stipple, (drm_r128_stipple_t __user *) data,
1557 sizeof(stipple));
1558
1559 if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32)))
1560 return DRM_ERR(EFAULT);
1561
1562 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1563
1564 r128_cce_dispatch_stipple(dev, mask);
1565
1566 COMMIT_RING();
1567 return 0;
1568 }
1569
1570 static int r128_cce_indirect(DRM_IOCTL_ARGS)
1571 {
1572 DRM_DEVICE;
1573 drm_r128_private_t *dev_priv = dev->dev_private;
1574 drm_device_dma_t *dma = dev->dma;
1575 drm_buf_t *buf;
1576 drm_r128_buf_priv_t *buf_priv;
1577 drm_r128_indirect_t indirect;
1578 #if 0
1579 RING_LOCALS;
1580 #endif
1581
1582 LOCK_TEST_WITH_RETURN(dev, filp);
1583
1584 if (!dev_priv) {
1585 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1586 return DRM_ERR(EINVAL);
1587 }
1588
1589 DRM_COPY_FROM_USER_IOCTL(indirect, (drm_r128_indirect_t __user *) data,
1590 sizeof(indirect));
1591
1592 DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
1593 indirect.idx, indirect.start, indirect.end, indirect.discard);
1594
1595 if (indirect.idx < 0 || indirect.idx >= dma->buf_count) {
1596 DRM_ERROR("buffer index %d (of %d max)\n",
1597 indirect.idx, dma->buf_count - 1);
1598 return DRM_ERR(EINVAL);
1599 }
1600
1601 buf = dma->buflist[indirect.idx];
1602 buf_priv = buf->dev_private;
1603
1604 if (buf->filp != filp) {
1605 DRM_ERROR("process %d using buffer owned by %p\n",
1606 DRM_CURRENTPID, buf->filp);
1607 return DRM_ERR(EINVAL);
1608 }
1609 if (buf->pending) {
1610 DRM_ERROR("sending pending buffer %d\n", indirect.idx);
1611 return DRM_ERR(EINVAL);
1612 }
1613
1614 if (indirect.start < buf->used) {
1615 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
1616 indirect.start, buf->used);
1617 return DRM_ERR(EINVAL);
1618 }
1619
1620 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1621 VB_AGE_TEST_WITH_RETURN(dev_priv);
1622
1623 buf->used = indirect.end;
1624 buf_priv->discard = indirect.discard;
1625
1626 #if 0
1627 /* Wait for the 3D stream to idle before the indirect buffer
1628 * containing 2D acceleration commands is processed.
1629 */
1630 BEGIN_RING(2);
1631 RADEON_WAIT_UNTIL_3D_IDLE();
1632 ADVANCE_RING();
1633 #endif
1634
1635 /* Dispatch the indirect buffer full of commands from the
1636 * X server. This is insecure and is thus only available to
1637 * privileged clients.
1638 */
1639 r128_cce_dispatch_indirect(dev, buf, indirect.start, indirect.end);
1640
1641 COMMIT_RING();
1642 return 0;
1643 }
1644
1645 static int r128_getparam(DRM_IOCTL_ARGS)
1646 {
1647 DRM_DEVICE;
1648 drm_r128_private_t *dev_priv = dev->dev_private;
1649 drm_r128_getparam_t param;
1650 int value;
1651
1652 if (!dev_priv) {
1653 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1654 return DRM_ERR(EINVAL);
1655 }
1656
1657 DRM_COPY_FROM_USER_IOCTL(param, (drm_r128_getparam_t __user *) data,
1658 sizeof(param));
1659
1660 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1661
1662 switch (param.param) {
1663 case R128_PARAM_IRQ_NR:
1664 value = dev->irq;
1665 break;
1666 default:
1667 return DRM_ERR(EINVAL);
1668 }
1669
1670 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
1671 DRM_ERROR("copy_to_user\n");
1672 return DRM_ERR(EFAULT);
1673 }
1674
1675 return 0;
1676 }
1677
1678 void r128_driver_preclose(drm_device_t * dev, DRMFILE filp)
1679 {
1680 if (dev->dev_private) {
1681 drm_r128_private_t *dev_priv = dev->dev_private;
1682 if (dev_priv->page_flipping) {
1683 r128_do_cleanup_pageflip(dev);
1684 }
1685 }
1686 }
1687
1688 void r128_driver_lastclose(drm_device_t * dev)
1689 {
1690 r128_do_cleanup_cce(dev);
1691 }
1692
1693 drm_ioctl_desc_t r128_ioctls[] = {
1694 [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1695 [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1696 [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1697 [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1698 [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, DRM_AUTH},
1699 [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, DRM_AUTH},
1700 [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, DRM_AUTH},
1701 [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, DRM_AUTH},
1702 [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, DRM_AUTH},
1703 [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, DRM_AUTH},
1704 [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, DRM_AUTH},
1705 [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, DRM_AUTH},
1706 [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, DRM_AUTH},
1707 [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, DRM_AUTH},
1708 [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, DRM_AUTH},
1709 [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1710 [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, DRM_AUTH},
1711 };
1712
1713 int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);