drm/radeon/kms: add r600 KMS support
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / gpu / drm / radeon / r600_cs.c
CommitLineData
3ce0a23d
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon.h"
30#include "radeon_share.h"
31#include "r600d.h"
32#include "avivod.h"
33
34static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
35 struct radeon_cs_reloc **cs_reloc);
36static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
37 struct radeon_cs_reloc **cs_reloc);
38typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
39static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
40
41/**
42 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
43 * @parser: parser structure holding parsing context.
44 * @pkt: where to store packet informations
45 *
46 * Assume that chunk_ib_index is properly set. Will return -EINVAL
47 * if packet is bigger than remaining ib size. or if packets is unknown.
48 **/
49int r600_cs_packet_parse(struct radeon_cs_parser *p,
50 struct radeon_cs_packet *pkt,
51 unsigned idx)
52{
53 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
54 uint32_t header;
55
56 if (idx >= ib_chunk->length_dw) {
57 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
58 idx, ib_chunk->length_dw);
59 return -EINVAL;
60 }
61 header = ib_chunk->kdata[idx];
62 pkt->idx = idx;
63 pkt->type = CP_PACKET_GET_TYPE(header);
64 pkt->count = CP_PACKET_GET_COUNT(header);
65 pkt->one_reg_wr = 0;
66 switch (pkt->type) {
67 case PACKET_TYPE0:
68 pkt->reg = CP_PACKET0_GET_REG(header);
69 break;
70 case PACKET_TYPE3:
71 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
72 break;
73 case PACKET_TYPE2:
74 pkt->count = -1;
75 break;
76 default:
77 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
78 return -EINVAL;
79 }
80 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
81 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
82 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
83 return -EINVAL;
84 }
85 return 0;
86}
87
88/**
89 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
90 * @parser: parser structure holding parsing context.
91 * @data: pointer to relocation data
92 * @offset_start: starting offset
93 * @offset_mask: offset mask (to align start offset on)
94 * @reloc: reloc informations
95 *
96 * Check next packet is relocation packet3, do bo validation and compute
97 * GPU offset using the provided start.
98 **/
99static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
100 struct radeon_cs_reloc **cs_reloc)
101{
102 struct radeon_cs_chunk *ib_chunk;
103 struct radeon_cs_chunk *relocs_chunk;
104 struct radeon_cs_packet p3reloc;
105 unsigned idx;
106 int r;
107
108 if (p->chunk_relocs_idx == -1) {
109 DRM_ERROR("No relocation chunk !\n");
110 return -EINVAL;
111 }
112 *cs_reloc = NULL;
113 ib_chunk = &p->chunks[p->chunk_ib_idx];
114 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
115 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
116 if (r) {
117 return r;
118 }
119 p->idx += p3reloc.count + 2;
120 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
121 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
122 p3reloc.idx);
123 return -EINVAL;
124 }
125 idx = ib_chunk->kdata[p3reloc.idx + 1];
126 if (idx >= relocs_chunk->length_dw) {
127 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
128 idx, relocs_chunk->length_dw);
129 return -EINVAL;
130 }
131 /* FIXME: we assume reloc size is 4 dwords */
132 *cs_reloc = p->relocs_ptr[(idx / 4)];
133 return 0;
134}
135
136/**
137 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
138 * @parser: parser structure holding parsing context.
139 * @data: pointer to relocation data
140 * @offset_start: starting offset
141 * @offset_mask: offset mask (to align start offset on)
142 * @reloc: reloc informations
143 *
144 * Check next packet is relocation packet3, do bo validation and compute
145 * GPU offset using the provided start.
146 **/
147static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
148 struct radeon_cs_reloc **cs_reloc)
149{
150 struct radeon_cs_chunk *ib_chunk;
151 struct radeon_cs_chunk *relocs_chunk;
152 struct radeon_cs_packet p3reloc;
153 unsigned idx;
154 int r;
155
156 if (p->chunk_relocs_idx == -1) {
157 DRM_ERROR("No relocation chunk !\n");
158 return -EINVAL;
159 }
160 *cs_reloc = NULL;
161 ib_chunk = &p->chunks[p->chunk_ib_idx];
162 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
163 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
164 if (r) {
165 return r;
166 }
167 p->idx += p3reloc.count + 2;
168 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
169 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
170 p3reloc.idx);
171 return -EINVAL;
172 }
173 idx = ib_chunk->kdata[p3reloc.idx + 1];
174 if (idx >= relocs_chunk->length_dw) {
175 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
176 idx, relocs_chunk->length_dw);
177 return -EINVAL;
178 }
179 *cs_reloc = &p->relocs[0];
180 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
181 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
182 return 0;
183}
184
185static int r600_packet0_check(struct radeon_cs_parser *p,
186 struct radeon_cs_packet *pkt,
187 unsigned idx, unsigned reg)
188{
189 switch (reg) {
190 case AVIVO_D1MODE_VLINE_START_END:
191 case AVIVO_D2MODE_VLINE_START_END:
192 break;
193 default:
194 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
195 reg, idx);
196 return -EINVAL;
197 }
198 return 0;
199}
200
201static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
202 struct radeon_cs_packet *pkt)
203{
204 unsigned reg, i;
205 unsigned idx;
206 int r;
207
208 idx = pkt->idx + 1;
209 reg = pkt->reg;
210 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
211 r = r600_packet0_check(p, pkt, idx, reg);
212 if (r) {
213 return r;
214 }
215 }
216 return 0;
217}
218
219static int r600_packet3_check(struct radeon_cs_parser *p,
220 struct radeon_cs_packet *pkt)
221{
222 struct radeon_cs_chunk *ib_chunk;
223 struct radeon_cs_reloc *reloc;
224 volatile u32 *ib;
225 unsigned idx;
226 unsigned i;
227 unsigned start_reg, end_reg, reg;
228 int r;
229
230 ib = p->ib->ptr;
231 ib_chunk = &p->chunks[p->chunk_ib_idx];
232 idx = pkt->idx + 1;
233 switch (pkt->opcode) {
234 case PACKET3_START_3D_CMDBUF:
235 if (p->family >= CHIP_RV770 || pkt->count) {
236 DRM_ERROR("bad START_3D\n");
237 return -EINVAL;
238 }
239 break;
240 case PACKET3_CONTEXT_CONTROL:
241 if (pkt->count != 1) {
242 DRM_ERROR("bad CONTEXT_CONTROL\n");
243 return -EINVAL;
244 }
245 break;
246 case PACKET3_INDEX_TYPE:
247 case PACKET3_NUM_INSTANCES:
248 if (pkt->count) {
249 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
250 return -EINVAL;
251 }
252 break;
253 case PACKET3_DRAW_INDEX:
254 if (pkt->count != 3) {
255 DRM_ERROR("bad DRAW_INDEX\n");
256 return -EINVAL;
257 }
258 r = r600_cs_packet_next_reloc(p, &reloc);
259 if (r) {
260 DRM_ERROR("bad DRAW_INDEX\n");
261 return -EINVAL;
262 }
263 ib[idx+0] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
264 ib[idx+1] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
265 break;
266 case PACKET3_DRAW_INDEX_AUTO:
267 if (pkt->count != 1) {
268 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
269 return -EINVAL;
270 }
271 break;
272 case PACKET3_DRAW_INDEX_IMMD_BE:
273 case PACKET3_DRAW_INDEX_IMMD:
274 if (pkt->count < 2) {
275 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
276 return -EINVAL;
277 }
278 break;
279 case PACKET3_WAIT_REG_MEM:
280 if (pkt->count != 5) {
281 DRM_ERROR("bad WAIT_REG_MEM\n");
282 return -EINVAL;
283 }
284 /* bit 4 is reg (0) or mem (1) */
285 if (ib_chunk->kdata[idx+0] & 0x10) {
286 r = r600_cs_packet_next_reloc(p, &reloc);
287 if (r) {
288 DRM_ERROR("bad WAIT_REG_MEM\n");
289 return -EINVAL;
290 }
291 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
292 ib[idx+2] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
293 }
294 break;
295 case PACKET3_SURFACE_SYNC:
296 if (pkt->count != 3) {
297 DRM_ERROR("bad SURFACE_SYNC\n");
298 return -EINVAL;
299 }
300 /* 0xffffffff/0x0 is flush all cache flag */
301 if (ib_chunk->kdata[idx+1] != 0xffffffff ||
302 ib_chunk->kdata[idx+2] != 0) {
303 r = r600_cs_packet_next_reloc(p, &reloc);
304 if (r) {
305 DRM_ERROR("bad SURFACE_SYNC\n");
306 return -EINVAL;
307 }
308 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
309 }
310 break;
311 case PACKET3_EVENT_WRITE:
312 if (pkt->count != 2 && pkt->count != 0) {
313 DRM_ERROR("bad EVENT_WRITE\n");
314 return -EINVAL;
315 }
316 if (pkt->count) {
317 r = r600_cs_packet_next_reloc(p, &reloc);
318 if (r) {
319 DRM_ERROR("bad EVENT_WRITE\n");
320 return -EINVAL;
321 }
322 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
323 ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
324 }
325 break;
326 case PACKET3_EVENT_WRITE_EOP:
327 if (pkt->count != 4) {
328 DRM_ERROR("bad EVENT_WRITE_EOP\n");
329 return -EINVAL;
330 }
331 r = r600_cs_packet_next_reloc(p, &reloc);
332 if (r) {
333 DRM_ERROR("bad EVENT_WRITE\n");
334 return -EINVAL;
335 }
336 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
337 ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
338 break;
339 case PACKET3_SET_CONFIG_REG:
340 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
341 end_reg = 4 * pkt->count + start_reg - 4;
342 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
343 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
344 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
345 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
346 return -EINVAL;
347 }
348 for (i = 0; i < pkt->count; i++) {
349 reg = start_reg + (4 * i);
350 switch (reg) {
351 case CP_COHER_BASE:
352 /* use PACKET3_SURFACE_SYNC */
353 return -EINVAL;
354 default:
355 break;
356 }
357 }
358 break;
359 case PACKET3_SET_CONTEXT_REG:
360 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
361 end_reg = 4 * pkt->count + start_reg - 4;
362 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
363 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
364 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
365 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
366 return -EINVAL;
367 }
368 for (i = 0; i < pkt->count; i++) {
369 reg = start_reg + (4 * i);
370 switch (reg) {
371 case DB_DEPTH_BASE:
372 case CB_COLOR0_BASE:
373 case CB_COLOR1_BASE:
374 case CB_COLOR2_BASE:
375 case CB_COLOR3_BASE:
376 case CB_COLOR4_BASE:
377 case CB_COLOR5_BASE:
378 case CB_COLOR6_BASE:
379 case CB_COLOR7_BASE:
380 case SQ_PGM_START_FS:
381 case SQ_PGM_START_ES:
382 case SQ_PGM_START_VS:
383 case SQ_PGM_START_GS:
384 case SQ_PGM_START_PS:
385 r = r600_cs_packet_next_reloc(p, &reloc);
386 if (r) {
387 DRM_ERROR("bad SET_CONTEXT_REG "
388 "0x%04X\n", reg);
389 return -EINVAL;
390 }
391 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
392 break;
393 case VGT_DMA_BASE:
394 case VGT_DMA_BASE_HI:
395 /* These should be handled by DRAW_INDEX packet 3 */
396 case VGT_STRMOUT_BASE_OFFSET_0:
397 case VGT_STRMOUT_BASE_OFFSET_1:
398 case VGT_STRMOUT_BASE_OFFSET_2:
399 case VGT_STRMOUT_BASE_OFFSET_3:
400 case VGT_STRMOUT_BASE_OFFSET_HI_0:
401 case VGT_STRMOUT_BASE_OFFSET_HI_1:
402 case VGT_STRMOUT_BASE_OFFSET_HI_2:
403 case VGT_STRMOUT_BASE_OFFSET_HI_3:
404 case VGT_STRMOUT_BUFFER_BASE_0:
405 case VGT_STRMOUT_BUFFER_BASE_1:
406 case VGT_STRMOUT_BUFFER_BASE_2:
407 case VGT_STRMOUT_BUFFER_BASE_3:
408 case VGT_STRMOUT_BUFFER_OFFSET_0:
409 case VGT_STRMOUT_BUFFER_OFFSET_1:
410 case VGT_STRMOUT_BUFFER_OFFSET_2:
411 case VGT_STRMOUT_BUFFER_OFFSET_3:
412 /* These should be handled by STRMOUT_BUFFER packet 3 */
413 DRM_ERROR("bad context reg: 0x%08x\n", reg);
414 return -EINVAL;
415 default:
416 break;
417 }
418 }
419 break;
420 case PACKET3_SET_RESOURCE:
421 if (pkt->count % 7) {
422 DRM_ERROR("bad SET_RESOURCE\n");
423 return -EINVAL;
424 }
425 start_reg = (ib[idx+0] << 2) + PACKET3_SET_RESOURCE_OFFSET;
426 end_reg = 4 * pkt->count + start_reg - 4;
427 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
428 (start_reg >= PACKET3_SET_RESOURCE_END) ||
429 (end_reg >= PACKET3_SET_RESOURCE_END)) {
430 DRM_ERROR("bad SET_RESOURCE\n");
431 return -EINVAL;
432 }
433 for (i = 0; i < (pkt->count / 7); i++) {
434 switch (G__SQ_VTX_CONSTANT_TYPE(ib[idx+(i*7)+6+1])) {
435 case SQ_TEX_VTX_VALID_TEXTURE:
436 /* tex base */
437 r = r600_cs_packet_next_reloc(p, &reloc);
438 if (r) {
439 DRM_ERROR("bad SET_RESOURCE\n");
440 return -EINVAL;
441 }
442 ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
443 /* tex mip base */
444 r = r600_cs_packet_next_reloc(p, &reloc);
445 if (r) {
446 DRM_ERROR("bad SET_RESOURCE\n");
447 return -EINVAL;
448 }
449 ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
450 break;
451 case SQ_TEX_VTX_VALID_BUFFER:
452 /* vtx base */
453 r = r600_cs_packet_next_reloc(p, &reloc);
454 if (r) {
455 DRM_ERROR("bad SET_RESOURCE\n");
456 return -EINVAL;
457 }
458 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
459 ib[idx+1+(i*7)+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
460 break;
461 case SQ_TEX_VTX_INVALID_TEXTURE:
462 case SQ_TEX_VTX_INVALID_BUFFER:
463 default:
464 DRM_ERROR("bad SET_RESOURCE\n");
465 return -EINVAL;
466 }
467 }
468 break;
469 case PACKET3_SET_ALU_CONST:
470 start_reg = (ib[idx+0] << 2) + PACKET3_SET_ALU_CONST_OFFSET;
471 end_reg = 4 * pkt->count + start_reg - 4;
472 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
473 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
474 (end_reg >= PACKET3_SET_ALU_CONST_END)) {
475 DRM_ERROR("bad SET_ALU_CONST\n");
476 return -EINVAL;
477 }
478 break;
479 case PACKET3_SET_BOOL_CONST:
480 start_reg = (ib[idx+0] << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
481 end_reg = 4 * pkt->count + start_reg - 4;
482 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
483 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
484 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
485 DRM_ERROR("bad SET_BOOL_CONST\n");
486 return -EINVAL;
487 }
488 break;
489 case PACKET3_SET_LOOP_CONST:
490 start_reg = (ib[idx+0] << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
491 end_reg = 4 * pkt->count + start_reg - 4;
492 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
493 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
494 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
495 DRM_ERROR("bad SET_LOOP_CONST\n");
496 return -EINVAL;
497 }
498 break;
499 case PACKET3_SET_CTL_CONST:
500 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CTL_CONST_OFFSET;
501 end_reg = 4 * pkt->count + start_reg - 4;
502 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
503 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
504 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
505 DRM_ERROR("bad SET_CTL_CONST\n");
506 return -EINVAL;
507 }
508 break;
509 case PACKET3_SET_SAMPLER:
510 if (pkt->count % 3) {
511 DRM_ERROR("bad SET_SAMPLER\n");
512 return -EINVAL;
513 }
514 start_reg = (ib[idx+0] << 2) + PACKET3_SET_SAMPLER_OFFSET;
515 end_reg = 4 * pkt->count + start_reg - 4;
516 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
517 (start_reg >= PACKET3_SET_SAMPLER_END) ||
518 (end_reg >= PACKET3_SET_SAMPLER_END)) {
519 DRM_ERROR("bad SET_SAMPLER\n");
520 return -EINVAL;
521 }
522 break;
523 case PACKET3_SURFACE_BASE_UPDATE:
524 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
525 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
526 return -EINVAL;
527 }
528 if (pkt->count) {
529 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
530 return -EINVAL;
531 }
532 break;
533 case PACKET3_NOP:
534 break;
535 default:
536 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
537 return -EINVAL;
538 }
539 return 0;
540}
541
542int r600_cs_parse(struct radeon_cs_parser *p)
543{
544 struct radeon_cs_packet pkt;
545 int r;
546
547 do {
548 r = r600_cs_packet_parse(p, &pkt, p->idx);
549 if (r) {
550 return r;
551 }
552 p->idx += pkt.count + 2;
553 switch (pkt.type) {
554 case PACKET_TYPE0:
555 r = r600_cs_parse_packet0(p, &pkt);
556 break;
557 case PACKET_TYPE2:
558 break;
559 case PACKET_TYPE3:
560 r = r600_packet3_check(p, &pkt);
561 break;
562 default:
563 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
564 return -EINVAL;
565 }
566 if (r) {
567 return r;
568 }
569 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
570#if 0
571 for (r = 0; r < p->ib->length_dw; r++) {
572 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
573 mdelay(1);
574 }
575#endif
576 return 0;
577}
578
579static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
580{
581 if (p->chunk_relocs_idx == -1) {
582 return 0;
583 }
584 p->relocs = kcalloc(1, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
585 if (p->relocs == NULL) {
586 return -ENOMEM;
587 }
588 return 0;
589}
590
591/**
592 * cs_parser_fini() - clean parser states
593 * @parser: parser structure holding parsing context.
594 * @error: error number
595 *
596 * If error is set than unvalidate buffer, otherwise just free memory
597 * used by parsing context.
598 **/
599static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
600{
601 unsigned i;
602
603 kfree(parser->relocs);
604 for (i = 0; i < parser->nchunks; i++) {
605 kfree(parser->chunks[i].kdata);
606 }
607 kfree(parser->chunks);
608 kfree(parser->chunks_array);
609}
610
611int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
612 unsigned family, u32 *ib, int *l)
613{
614 struct radeon_cs_parser parser;
615 struct radeon_cs_chunk *ib_chunk;
616 struct radeon_ib fake_ib;
617 int r;
618
619 /* initialize parser */
620 memset(&parser, 0, sizeof(struct radeon_cs_parser));
621 parser.filp = filp;
622 parser.rdev = NULL;
623 parser.family = family;
624 parser.ib = &fake_ib;
625 fake_ib.ptr = ib;
626 r = radeon_cs_parser_init(&parser, data);
627 if (r) {
628 DRM_ERROR("Failed to initialize parser !\n");
629 r600_cs_parser_fini(&parser, r);
630 return r;
631 }
632 r = r600_cs_parser_relocs_legacy(&parser);
633 if (r) {
634 DRM_ERROR("Failed to parse relocation !\n");
635 r600_cs_parser_fini(&parser, r);
636 return r;
637 }
638 /* Copy the packet into the IB, the parser will read from the
639 * input memory (cached) and write to the IB (which can be
640 * uncached). */
641 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
642 parser.ib->length_dw = ib_chunk->length_dw;
643 memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
644 *l = parser.ib->length_dw;
645 r = r600_cs_parse(&parser);
646 if (r) {
647 DRM_ERROR("Invalid command stream !\n");
648 r600_cs_parser_fini(&parser, r);
649 return r;
650 }
651 r600_cs_parser_fini(&parser, r);
652 return r;
653}
654
655void r600_cs_legacy_init(void)
656{
657 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
658}