drm/radeon/kms: cleanup - remove radeon_share.h
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / gpu / drm / radeon / r300.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "drm.h"
31#include "radeon_reg.h"
32#include "radeon.h"
e024e110 33#include "radeon_drm.h"
551ebd83 34#include "r100_track.h"
3ce0a23d 35#include "r300d.h"
771fe6b9 36
50f15303
DA
37#include "r300_reg_safe.h"
38
771fe6b9
JG
39/* r300,r350,rv350,rv370,rv380 depends on : */
40void r100_hdp_reset(struct radeon_device *rdev);
41int r100_cp_reset(struct radeon_device *rdev);
42int r100_rb2d_reset(struct radeon_device *rdev);
43int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
44int r100_pci_gart_enable(struct radeon_device *rdev);
45void r100_pci_gart_disable(struct radeon_device *rdev);
46void r100_mc_setup(struct radeon_device *rdev);
47void r100_mc_disable_clients(struct radeon_device *rdev);
48int r100_gui_wait_for_idle(struct radeon_device *rdev);
49int r100_cs_packet_parse(struct radeon_cs_parser *p,
50 struct radeon_cs_packet *pkt,
51 unsigned idx);
531369e6 52int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
771fe6b9
JG
53int r100_cs_parse_packet0(struct radeon_cs_parser *p,
54 struct radeon_cs_packet *pkt,
068a117c 55 const unsigned *auth, unsigned n,
771fe6b9 56 radeon_packet0_check_t check);
068a117c
JG
57int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
58 struct radeon_cs_packet *pkt,
59 struct radeon_object *robj);
771fe6b9
JG
60
61/* This files gather functions specifics to:
62 * r300,r350,rv350,rv370,rv380
63 *
64 * Some of these functions might be used by newer ASICs.
65 */
66void r300_gpu_init(struct radeon_device *rdev);
67int r300_mc_wait_for_idle(struct radeon_device *rdev);
68int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
69
70
71/*
72 * rv370,rv380 PCIE GART
73 */
74void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
75{
76 uint32_t tmp;
77 int i;
78
79 /* Workaround HW bug do flush 2 times */
80 for (i = 0; i < 2; i++) {
81 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
82 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
83 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
84 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
771fe6b9 85 }
de1b2898 86 mb();
771fe6b9
JG
87}
88
89int rv370_pcie_gart_enable(struct radeon_device *rdev)
90{
91 uint32_t table_addr;
92 uint32_t tmp;
93 int r;
94
95 /* Initialize common gart structure */
96 r = radeon_gart_init(rdev);
97 if (r) {
98 return r;
99 }
100 r = rv370_debugfs_pcie_gart_info_init(rdev);
101 if (r) {
102 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
103 }
104 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
105 r = radeon_gart_table_vram_alloc(rdev);
106 if (r) {
107 return r;
108 }
109 /* discard memory request outside of configured range */
110 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
111 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
112 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
113 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096;
114 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
115 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
116 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
117 table_addr = rdev->gart.table_addr;
118 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
119 /* FIXME: setup default page */
120 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
121 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
122 /* Clear error */
123 WREG32_PCIE(0x18, 0);
124 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
125 tmp |= RADEON_PCIE_TX_GART_EN;
126 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
127 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
128 rv370_pcie_gart_tlb_flush(rdev);
129 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
3ce0a23d 130 (unsigned)(rdev->mc.gtt_size >> 20), table_addr);
771fe6b9
JG
131 rdev->gart.ready = true;
132 return 0;
133}
134
135void rv370_pcie_gart_disable(struct radeon_device *rdev)
136{
137 uint32_t tmp;
138
139 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
140 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
141 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
142 if (rdev->gart.table.vram.robj) {
143 radeon_object_kunmap(rdev->gart.table.vram.robj);
144 radeon_object_unpin(rdev->gart.table.vram.robj);
145 }
146}
147
148int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
149{
150 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
151
152 if (i < 0 || i > rdev->gart.num_gpu_pages) {
153 return -EINVAL;
154 }
ed10f95d
DA
155 addr = (lower_32_bits(addr) >> 8) |
156 ((upper_32_bits(addr) & 0xff) << 24) |
157 0xc;
77bd36f0
DA
158 /* on x86 we want this to be CPU endian, on powerpc
159 * on powerpc without HW swappers, it'll get swapped on way
160 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
161 writel(addr, ((void __iomem *)ptr) + (i * 4));
771fe6b9
JG
162 return 0;
163}
164
165int r300_gart_enable(struct radeon_device *rdev)
166{
167#if __OS_HAS_AGP
168 if (rdev->flags & RADEON_IS_AGP) {
169 if (rdev->family > CHIP_RV350) {
170 rv370_pcie_gart_disable(rdev);
171 } else {
172 r100_pci_gart_disable(rdev);
173 }
174 return 0;
175 }
176#endif
177 if (rdev->flags & RADEON_IS_PCIE) {
178 rdev->asic->gart_disable = &rv370_pcie_gart_disable;
179 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
180 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
181 return rv370_pcie_gart_enable(rdev);
182 }
c000273e
JG
183 if (rdev->flags & RADEON_IS_PCI) {
184 rdev->asic->gart_disable = &r100_pci_gart_disable;
185 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
186 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
187 return r100_pci_gart_enable(rdev);
188 }
771fe6b9
JG
189 return r100_pci_gart_enable(rdev);
190}
191
192
193/*
194 * MC
195 */
196int r300_mc_init(struct radeon_device *rdev)
197{
198 int r;
199
200 if (r100_debugfs_rbbm_init(rdev)) {
201 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
202 }
203
204 r300_gpu_init(rdev);
205 r100_pci_gart_disable(rdev);
206 if (rdev->flags & RADEON_IS_PCIE) {
207 rv370_pcie_gart_disable(rdev);
208 }
209
210 /* Setup GPU memory space */
211 rdev->mc.vram_location = 0xFFFFFFFFUL;
212 rdev->mc.gtt_location = 0xFFFFFFFFUL;
213 if (rdev->flags & RADEON_IS_AGP) {
214 r = radeon_agp_init(rdev);
215 if (r) {
216 printk(KERN_WARNING "[drm] Disabling AGP\n");
217 rdev->flags &= ~RADEON_IS_AGP;
218 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
219 } else {
220 rdev->mc.gtt_location = rdev->mc.agp_base;
221 }
222 }
223 r = radeon_mc_setup(rdev);
224 if (r) {
225 return r;
226 }
227
228 /* Program GPU memory space */
229 r100_mc_disable_clients(rdev);
230 if (r300_mc_wait_for_idle(rdev)) {
231 printk(KERN_WARNING "Failed to wait MC idle while "
232 "programming pipes. Bad things might happen.\n");
233 }
234 r100_mc_setup(rdev);
235 return 0;
236}
237
238void r300_mc_fini(struct radeon_device *rdev)
239{
240 if (rdev->flags & RADEON_IS_PCIE) {
241 rv370_pcie_gart_disable(rdev);
242 radeon_gart_table_vram_free(rdev);
243 } else {
244 r100_pci_gart_disable(rdev);
245 radeon_gart_table_ram_free(rdev);
246 }
247 radeon_gart_fini(rdev);
248}
249
250
251/*
252 * Fence emission
253 */
254void r300_fence_ring_emit(struct radeon_device *rdev,
255 struct radeon_fence *fence)
256{
257 /* Who ever call radeon_fence_emit should call ring_lock and ask
258 * for enough space (today caller are ib schedule and buffer move) */
259 /* Write SC register so SC & US assert idle */
260 radeon_ring_write(rdev, PACKET0(0x43E0, 0));
261 radeon_ring_write(rdev, 0);
262 radeon_ring_write(rdev, PACKET0(0x43E4, 0));
263 radeon_ring_write(rdev, 0);
264 /* Flush 3D cache */
265 radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
266 radeon_ring_write(rdev, (2 << 0));
267 radeon_ring_write(rdev, PACKET0(0x4F18, 0));
268 radeon_ring_write(rdev, (1 << 0));
269 /* Wait until IDLE & CLEAN */
270 radeon_ring_write(rdev, PACKET0(0x1720, 0));
271 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
272 /* Emit fence sequence & fire IRQ */
273 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
274 radeon_ring_write(rdev, fence->seq);
275 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
276 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
277}
278
279
280/*
281 * Global GPU functions
282 */
283int r300_copy_dma(struct radeon_device *rdev,
284 uint64_t src_offset,
285 uint64_t dst_offset,
286 unsigned num_pages,
287 struct radeon_fence *fence)
288{
289 uint32_t size;
290 uint32_t cur_size;
291 int i, num_loops;
292 int r = 0;
293
294 /* radeon pitch is /64 */
295 size = num_pages << PAGE_SHIFT;
296 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
297 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
298 if (r) {
299 DRM_ERROR("radeon: moving bo (%d).\n", r);
300 return r;
301 }
302 /* Must wait for 2D idle & clean before DMA or hangs might happen */
068a117c 303 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
771fe6b9
JG
304 radeon_ring_write(rdev, (1 << 16));
305 for (i = 0; i < num_loops; i++) {
306 cur_size = size;
307 if (cur_size > 0x1FFFFF) {
308 cur_size = 0x1FFFFF;
309 }
310 size -= cur_size;
311 radeon_ring_write(rdev, PACKET0(0x720, 2));
312 radeon_ring_write(rdev, src_offset);
313 radeon_ring_write(rdev, dst_offset);
314 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
315 src_offset += cur_size;
316 dst_offset += cur_size;
317 }
318 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
319 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
320 if (fence) {
321 r = radeon_fence_emit(rdev, fence);
322 }
323 radeon_ring_unlock_commit(rdev);
324 return r;
325}
326
327void r300_ring_start(struct radeon_device *rdev)
328{
329 unsigned gb_tile_config;
330 int r;
331
332 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
333 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
068a117c 334 switch(rdev->num_gb_pipes) {
771fe6b9
JG
335 case 2:
336 gb_tile_config |= R300_PIPE_COUNT_R300;
337 break;
338 case 3:
339 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
340 break;
341 case 4:
342 gb_tile_config |= R300_PIPE_COUNT_R420;
343 break;
344 case 1:
345 default:
346 gb_tile_config |= R300_PIPE_COUNT_RV350;
347 break;
348 }
349
350 r = radeon_ring_lock(rdev, 64);
351 if (r) {
352 return;
353 }
354 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
355 radeon_ring_write(rdev,
356 RADEON_ISYNC_ANY2D_IDLE3D |
357 RADEON_ISYNC_ANY3D_IDLE2D |
358 RADEON_ISYNC_WAIT_IDLEGUI |
359 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
360 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
361 radeon_ring_write(rdev, gb_tile_config);
362 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
363 radeon_ring_write(rdev,
364 RADEON_WAIT_2D_IDLECLEAN |
365 RADEON_WAIT_3D_IDLECLEAN);
366 radeon_ring_write(rdev, PACKET0(0x170C, 0));
367 radeon_ring_write(rdev, 1 << 31);
368 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
369 radeon_ring_write(rdev, 0);
370 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
371 radeon_ring_write(rdev, 0);
372 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
373 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
374 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
375 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
376 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
377 radeon_ring_write(rdev,
378 RADEON_WAIT_2D_IDLECLEAN |
379 RADEON_WAIT_3D_IDLECLEAN);
380 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
381 radeon_ring_write(rdev, 0);
382 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
383 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
384 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
385 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
386 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
387 radeon_ring_write(rdev,
388 ((6 << R300_MS_X0_SHIFT) |
389 (6 << R300_MS_Y0_SHIFT) |
390 (6 << R300_MS_X1_SHIFT) |
391 (6 << R300_MS_Y1_SHIFT) |
392 (6 << R300_MS_X2_SHIFT) |
393 (6 << R300_MS_Y2_SHIFT) |
394 (6 << R300_MSBD0_Y_SHIFT) |
395 (6 << R300_MSBD0_X_SHIFT)));
396 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
397 radeon_ring_write(rdev,
398 ((6 << R300_MS_X3_SHIFT) |
399 (6 << R300_MS_Y3_SHIFT) |
400 (6 << R300_MS_X4_SHIFT) |
401 (6 << R300_MS_Y4_SHIFT) |
402 (6 << R300_MS_X5_SHIFT) |
403 (6 << R300_MS_Y5_SHIFT) |
404 (6 << R300_MSBD1_SHIFT)));
405 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
406 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
407 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
408 radeon_ring_write(rdev,
409 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
410 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
411 radeon_ring_write(rdev,
412 R300_GEOMETRY_ROUND_NEAREST |
413 R300_COLOR_ROUND_NEAREST);
414 radeon_ring_unlock_commit(rdev);
415}
416
417void r300_errata(struct radeon_device *rdev)
418{
419 rdev->pll_errata = 0;
420
421 if (rdev->family == CHIP_R300 &&
422 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
423 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
424 }
425}
426
427int r300_mc_wait_for_idle(struct radeon_device *rdev)
428{
429 unsigned i;
430 uint32_t tmp;
431
432 for (i = 0; i < rdev->usec_timeout; i++) {
433 /* read MC_STATUS */
434 tmp = RREG32(0x0150);
435 if (tmp & (1 << 4)) {
436 return 0;
437 }
438 DRM_UDELAY(1);
439 }
440 return -1;
441}
442
443void r300_gpu_init(struct radeon_device *rdev)
444{
445 uint32_t gb_tile_config, tmp;
446
447 r100_hdp_reset(rdev);
448 /* FIXME: rv380 one pipes ? */
449 if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
450 /* r300,r350 */
451 rdev->num_gb_pipes = 2;
452 } else {
453 /* rv350,rv370,rv380 */
454 rdev->num_gb_pipes = 1;
455 }
f779b3e5 456 rdev->num_z_pipes = 1;
771fe6b9
JG
457 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
458 switch (rdev->num_gb_pipes) {
459 case 2:
460 gb_tile_config |= R300_PIPE_COUNT_R300;
461 break;
462 case 3:
463 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
464 break;
465 case 4:
466 gb_tile_config |= R300_PIPE_COUNT_R420;
467 break;
771fe6b9 468 default:
068a117c 469 case 1:
771fe6b9
JG
470 gb_tile_config |= R300_PIPE_COUNT_RV350;
471 break;
472 }
473 WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
474
475 if (r100_gui_wait_for_idle(rdev)) {
476 printk(KERN_WARNING "Failed to wait GUI idle while "
477 "programming pipes. Bad things might happen.\n");
478 }
479
480 tmp = RREG32(0x170C);
481 WREG32(0x170C, tmp | (1 << 31));
482
483 WREG32(R300_RB2D_DSTCACHE_MODE,
484 R300_DC_AUTOFLUSH_ENABLE |
485 R300_DC_DC_DISABLE_IGNORE_PE);
486
487 if (r100_gui_wait_for_idle(rdev)) {
488 printk(KERN_WARNING "Failed to wait GUI idle while "
489 "programming pipes. Bad things might happen.\n");
490 }
491 if (r300_mc_wait_for_idle(rdev)) {
492 printk(KERN_WARNING "Failed to wait MC idle while "
493 "programming pipes. Bad things might happen.\n");
494 }
f779b3e5
AD
495 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
496 rdev->num_gb_pipes, rdev->num_z_pipes);
771fe6b9
JG
497}
498
499int r300_ga_reset(struct radeon_device *rdev)
500{
501 uint32_t tmp;
502 bool reinit_cp;
503 int i;
504
505 reinit_cp = rdev->cp.ready;
506 rdev->cp.ready = false;
507 for (i = 0; i < rdev->usec_timeout; i++) {
508 WREG32(RADEON_CP_CSQ_MODE, 0);
509 WREG32(RADEON_CP_CSQ_CNTL, 0);
510 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
511 (void)RREG32(RADEON_RBBM_SOFT_RESET);
512 udelay(200);
513 WREG32(RADEON_RBBM_SOFT_RESET, 0);
514 /* Wait to prevent race in RBBM_STATUS */
515 mdelay(1);
516 tmp = RREG32(RADEON_RBBM_STATUS);
517 if (tmp & ((1 << 20) | (1 << 26))) {
518 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
519 /* GA still busy soft reset it */
520 WREG32(0x429C, 0x200);
521 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
522 WREG32(0x43E0, 0);
523 WREG32(0x43E4, 0);
524 WREG32(0x24AC, 0);
525 }
526 /* Wait to prevent race in RBBM_STATUS */
527 mdelay(1);
528 tmp = RREG32(RADEON_RBBM_STATUS);
529 if (!(tmp & ((1 << 20) | (1 << 26)))) {
530 break;
531 }
532 }
533 for (i = 0; i < rdev->usec_timeout; i++) {
534 tmp = RREG32(RADEON_RBBM_STATUS);
535 if (!(tmp & ((1 << 20) | (1 << 26)))) {
536 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
537 tmp);
538 if (reinit_cp) {
539 return r100_cp_init(rdev, rdev->cp.ring_size);
540 }
541 return 0;
542 }
543 DRM_UDELAY(1);
544 }
545 tmp = RREG32(RADEON_RBBM_STATUS);
546 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
547 return -1;
548}
549
550int r300_gpu_reset(struct radeon_device *rdev)
551{
552 uint32_t status;
553
554 /* reset order likely matter */
555 status = RREG32(RADEON_RBBM_STATUS);
556 /* reset HDP */
557 r100_hdp_reset(rdev);
558 /* reset rb2d */
559 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
560 r100_rb2d_reset(rdev);
561 }
562 /* reset GA */
563 if (status & ((1 << 20) | (1 << 26))) {
564 r300_ga_reset(rdev);
565 }
566 /* reset CP */
567 status = RREG32(RADEON_RBBM_STATUS);
568 if (status & (1 << 16)) {
569 r100_cp_reset(rdev);
570 }
571 /* Check if GPU is idle */
572 status = RREG32(RADEON_RBBM_STATUS);
573 if (status & (1 << 31)) {
574 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
575 return -1;
576 }
577 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
578 return 0;
579}
580
581
582/*
583 * r300,r350,rv350,rv380 VRAM info
584 */
585void r300_vram_info(struct radeon_device *rdev)
586{
587 uint32_t tmp;
588
589 /* DDR for all card after R300 & IGP */
590 rdev->mc.vram_is_ddr = true;
591 tmp = RREG32(RADEON_MEM_CNTL);
592 if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
593 rdev->mc.vram_width = 128;
594 } else {
595 rdev->mc.vram_width = 64;
596 }
771fe6b9 597
2a0f8918 598 r100_vram_init_sizes(rdev);
771fe6b9
JG
599}
600
601
771fe6b9
JG
602/*
603 * PCIE Lanes
604 */
605
606void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
607{
608 uint32_t link_width_cntl, mask;
609
610 if (rdev->flags & RADEON_IS_IGP)
611 return;
612
613 if (!(rdev->flags & RADEON_IS_PCIE))
614 return;
615
616 /* FIXME wait for idle */
617
618 switch (lanes) {
619 case 0:
620 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
621 break;
622 case 1:
623 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
624 break;
625 case 2:
626 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
627 break;
628 case 4:
629 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
630 break;
631 case 8:
632 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
633 break;
634 case 12:
635 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
636 break;
637 case 16:
638 default:
639 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
640 break;
641 }
642
643 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
644
645 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
646 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
647 return;
648
649 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
650 RADEON_PCIE_LC_RECONFIG_NOW |
651 RADEON_PCIE_LC_RECONFIG_LATER |
652 RADEON_PCIE_LC_SHORT_RECONFIG_EN);
653 link_width_cntl |= mask;
654 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
655 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
656 RADEON_PCIE_LC_RECONFIG_NOW));
657
658 /* wait for lane set to complete */
659 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
660 while (link_width_cntl == 0xffffffff)
661 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
662
663}
664
665
666/*
667 * Debugfs info
668 */
669#if defined(CONFIG_DEBUG_FS)
670static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
671{
672 struct drm_info_node *node = (struct drm_info_node *) m->private;
673 struct drm_device *dev = node->minor->dev;
674 struct radeon_device *rdev = dev->dev_private;
675 uint32_t tmp;
676
677 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
678 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
679 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
680 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
681 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
682 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
683 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
684 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
685 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
686 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
687 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
688 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
689 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
690 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
691 return 0;
692}
693
694static struct drm_info_list rv370_pcie_gart_info_list[] = {
695 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
696};
697#endif
698
699int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
700{
701#if defined(CONFIG_DEBUG_FS)
702 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
703#else
704 return 0;
705#endif
706}
707
708
709/*
710 * CS functions
711 */
771fe6b9
JG
712static int r300_packet0_check(struct radeon_cs_parser *p,
713 struct radeon_cs_packet *pkt,
714 unsigned idx, unsigned reg)
715{
716 struct radeon_cs_chunk *ib_chunk;
717 struct radeon_cs_reloc *reloc;
551ebd83 718 struct r100_cs_track *track;
771fe6b9 719 volatile uint32_t *ib;
e024e110 720 uint32_t tmp, tile_flags = 0;
771fe6b9
JG
721 unsigned i;
722 int r;
723
724 ib = p->ib->ptr;
725 ib_chunk = &p->chunks[p->chunk_ib_idx];
551ebd83 726 track = (struct r100_cs_track *)p->track;
068a117c 727 switch(reg) {
531369e6
DA
728 case AVIVO_D1MODE_VLINE_START_END:
729 case RADEON_CRTC_GUI_TRIG_VLINE:
730 r = r100_cs_packet_parse_vline(p);
731 if (r) {
732 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
733 idx, reg);
734 r100_cs_dump_packet(p, pkt);
735 return r;
736 }
737 break;
771fe6b9
JG
738 case RADEON_DST_PITCH_OFFSET:
739 case RADEON_SRC_PITCH_OFFSET:
551ebd83
DA
740 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
741 if (r)
771fe6b9 742 return r;
771fe6b9
JG
743 break;
744 case R300_RB3D_COLOROFFSET0:
745 case R300_RB3D_COLOROFFSET1:
746 case R300_RB3D_COLOROFFSET2:
747 case R300_RB3D_COLOROFFSET3:
748 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
749 r = r100_cs_packet_next_reloc(p, &reloc);
750 if (r) {
751 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
752 idx, reg);
753 r100_cs_dump_packet(p, pkt);
754 return r;
755 }
756 track->cb[i].robj = reloc->robj;
757 track->cb[i].offset = ib_chunk->kdata[idx];
758 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
759 break;
760 case R300_ZB_DEPTHOFFSET:
761 r = r100_cs_packet_next_reloc(p, &reloc);
762 if (r) {
763 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
764 idx, reg);
765 r100_cs_dump_packet(p, pkt);
766 return r;
767 }
768 track->zb.robj = reloc->robj;
769 track->zb.offset = ib_chunk->kdata[idx];
770 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
771 break;
772 case R300_TX_OFFSET_0:
773 case R300_TX_OFFSET_0+4:
774 case R300_TX_OFFSET_0+8:
775 case R300_TX_OFFSET_0+12:
776 case R300_TX_OFFSET_0+16:
777 case R300_TX_OFFSET_0+20:
778 case R300_TX_OFFSET_0+24:
779 case R300_TX_OFFSET_0+28:
780 case R300_TX_OFFSET_0+32:
781 case R300_TX_OFFSET_0+36:
782 case R300_TX_OFFSET_0+40:
783 case R300_TX_OFFSET_0+44:
784 case R300_TX_OFFSET_0+48:
785 case R300_TX_OFFSET_0+52:
786 case R300_TX_OFFSET_0+56:
787 case R300_TX_OFFSET_0+60:
068a117c 788 i = (reg - R300_TX_OFFSET_0) >> 2;
771fe6b9
JG
789 r = r100_cs_packet_next_reloc(p, &reloc);
790 if (r) {
791 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
792 idx, reg);
793 r100_cs_dump_packet(p, pkt);
794 return r;
795 }
796 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
068a117c 797 track->textures[i].robj = reloc->robj;
771fe6b9
JG
798 break;
799 /* Tracked registers */
068a117c
JG
800 case 0x2084:
801 /* VAP_VF_CNTL */
802 track->vap_vf_cntl = ib_chunk->kdata[idx];
803 break;
804 case 0x20B4:
805 /* VAP_VTX_SIZE */
806 track->vtx_size = ib_chunk->kdata[idx] & 0x7F;
807 break;
808 case 0x2134:
809 /* VAP_VF_MAX_VTX_INDX */
810 track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL;
811 break;
771fe6b9
JG
812 case 0x43E4:
813 /* SC_SCISSOR1 */
771fe6b9
JG
814 track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1;
815 if (p->rdev->family < CHIP_RV515) {
816 track->maxy -= 1440;
817 }
818 break;
819 case 0x4E00:
820 /* RB3D_CCTL */
821 track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1;
822 break;
823 case 0x4E38:
824 case 0x4E3C:
825 case 0x4E40:
826 case 0x4E44:
827 /* RB3D_COLORPITCH0 */
828 /* RB3D_COLORPITCH1 */
829 /* RB3D_COLORPITCH2 */
830 /* RB3D_COLORPITCH3 */
e024e110
DA
831 r = r100_cs_packet_next_reloc(p, &reloc);
832 if (r) {
833 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
834 idx, reg);
835 r100_cs_dump_packet(p, pkt);
836 return r;
837 }
838
839 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
840 tile_flags |= R300_COLOR_TILE_ENABLE;
841 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
842 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
843
844 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
845 tmp |= tile_flags;
846 ib[idx] = tmp;
847
771fe6b9
JG
848 i = (reg - 0x4E38) >> 2;
849 track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
850 switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
851 case 9:
852 case 11:
853 case 12:
854 track->cb[i].cpp = 1;
855 break;
856 case 3:
857 case 4:
858 case 13:
859 case 15:
860 track->cb[i].cpp = 2;
861 break;
862 case 6:
863 track->cb[i].cpp = 4;
864 break;
865 case 10:
866 track->cb[i].cpp = 8;
867 break;
868 case 7:
869 track->cb[i].cpp = 16;
870 break;
871 default:
872 DRM_ERROR("Invalid color buffer format (%d) !\n",
873 ((ib_chunk->kdata[idx] >> 21) & 0xF));
874 return -EINVAL;
875 }
876 break;
877 case 0x4F00:
878 /* ZB_CNTL */
879 if (ib_chunk->kdata[idx] & 2) {
880 track->z_enabled = true;
881 } else {
882 track->z_enabled = false;
883 }
884 break;
885 case 0x4F10:
886 /* ZB_FORMAT */
887 switch ((ib_chunk->kdata[idx] & 0xF)) {
888 case 0:
889 case 1:
890 track->zb.cpp = 2;
891 break;
892 case 2:
893 track->zb.cpp = 4;
894 break;
895 default:
896 DRM_ERROR("Invalid z buffer format (%d) !\n",
897 (ib_chunk->kdata[idx] & 0xF));
898 return -EINVAL;
899 }
900 break;
901 case 0x4F24:
902 /* ZB_DEPTHPITCH */
e024e110
DA
903 r = r100_cs_packet_next_reloc(p, &reloc);
904 if (r) {
905 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
906 idx, reg);
907 r100_cs_dump_packet(p, pkt);
908 return r;
909 }
910
911 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
912 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
913 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
914 tile_flags |= R300_DEPTHMICROTILE_TILED;;
915
916 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
917 tmp |= tile_flags;
918 ib[idx] = tmp;
919
771fe6b9
JG
920 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
921 break;
068a117c
JG
922 case 0x4104:
923 for (i = 0; i < 16; i++) {
924 bool enabled;
925
926 enabled = !!(ib_chunk->kdata[idx] & (1 << i));
927 track->textures[i].enabled = enabled;
928 }
929 break;
930 case 0x44C0:
931 case 0x44C4:
932 case 0x44C8:
933 case 0x44CC:
934 case 0x44D0:
935 case 0x44D4:
936 case 0x44D8:
937 case 0x44DC:
938 case 0x44E0:
939 case 0x44E4:
940 case 0x44E8:
941 case 0x44EC:
942 case 0x44F0:
943 case 0x44F4:
944 case 0x44F8:
945 case 0x44FC:
946 /* TX_FORMAT1_[0-15] */
947 i = (reg - 0x44C0) >> 2;
948 tmp = (ib_chunk->kdata[idx] >> 25) & 0x3;
949 track->textures[i].tex_coord_type = tmp;
950 switch ((ib_chunk->kdata[idx] & 0x1F)) {
551ebd83
DA
951 case R300_TX_FORMAT_X8:
952 case R300_TX_FORMAT_Y4X4:
953 case R300_TX_FORMAT_Z3Y3X2:
068a117c
JG
954 track->textures[i].cpp = 1;
955 break;
551ebd83
DA
956 case R300_TX_FORMAT_X16:
957 case R300_TX_FORMAT_Y8X8:
958 case R300_TX_FORMAT_Z5Y6X5:
959 case R300_TX_FORMAT_Z6Y5X5:
960 case R300_TX_FORMAT_W4Z4Y4X4:
961 case R300_TX_FORMAT_W1Z5Y5X5:
962 case R300_TX_FORMAT_DXT1:
963 case R300_TX_FORMAT_D3DMFT_CxV8U8:
964 case R300_TX_FORMAT_B8G8_B8G8:
965 case R300_TX_FORMAT_G8R8_G8B8:
068a117c
JG
966 track->textures[i].cpp = 2;
967 break;
551ebd83
DA
968 case R300_TX_FORMAT_Y16X16:
969 case R300_TX_FORMAT_Z11Y11X10:
970 case R300_TX_FORMAT_Z10Y11X11:
971 case R300_TX_FORMAT_W8Z8Y8X8:
972 case R300_TX_FORMAT_W2Z10Y10X10:
973 case 0x17:
974 case R300_TX_FORMAT_FL_I32:
975 case 0x1e:
976 case R300_TX_FORMAT_DXT3:
977 case R300_TX_FORMAT_DXT5:
068a117c
JG
978 track->textures[i].cpp = 4;
979 break;
551ebd83
DA
980 case R300_TX_FORMAT_W16Z16Y16X16:
981 case R300_TX_FORMAT_FL_R16G16B16A16:
982 case R300_TX_FORMAT_FL_I32A32:
068a117c
JG
983 track->textures[i].cpp = 8;
984 break;
551ebd83 985 case R300_TX_FORMAT_FL_R32G32B32A32:
068a117c
JG
986 track->textures[i].cpp = 16;
987 break;
988 default:
989 DRM_ERROR("Invalid texture format %u\n",
990 (ib_chunk->kdata[idx] & 0x1F));
991 return -EINVAL;
992 break;
993 }
994 break;
995 case 0x4400:
996 case 0x4404:
997 case 0x4408:
998 case 0x440C:
999 case 0x4410:
1000 case 0x4414:
1001 case 0x4418:
1002 case 0x441C:
1003 case 0x4420:
1004 case 0x4424:
1005 case 0x4428:
1006 case 0x442C:
1007 case 0x4430:
1008 case 0x4434:
1009 case 0x4438:
1010 case 0x443C:
1011 /* TX_FILTER0_[0-15] */
1012 i = (reg - 0x4400) >> 2;
551ebd83 1013 tmp = ib_chunk->kdata[idx] & 0x7;
068a117c
JG
1014 if (tmp == 2 || tmp == 4 || tmp == 6) {
1015 track->textures[i].roundup_w = false;
1016 }
551ebd83 1017 tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;
068a117c
JG
1018 if (tmp == 2 || tmp == 4 || tmp == 6) {
1019 track->textures[i].roundup_h = false;
1020 }
1021 break;
1022 case 0x4500:
1023 case 0x4504:
1024 case 0x4508:
1025 case 0x450C:
1026 case 0x4510:
1027 case 0x4514:
1028 case 0x4518:
1029 case 0x451C:
1030 case 0x4520:
1031 case 0x4524:
1032 case 0x4528:
1033 case 0x452C:
1034 case 0x4530:
1035 case 0x4534:
1036 case 0x4538:
1037 case 0x453C:
1038 /* TX_FORMAT2_[0-15] */
1039 i = (reg - 0x4500) >> 2;
1040 tmp = ib_chunk->kdata[idx] & 0x3FFF;
1041 track->textures[i].pitch = tmp + 1;
1042 if (p->rdev->family >= CHIP_RV515) {
1043 tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11;
1044 track->textures[i].width_11 = tmp;
1045 tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11;
1046 track->textures[i].height_11 = tmp;
1047 }
1048 break;
1049 case 0x4480:
1050 case 0x4484:
1051 case 0x4488:
1052 case 0x448C:
1053 case 0x4490:
1054 case 0x4494:
1055 case 0x4498:
1056 case 0x449C:
1057 case 0x44A0:
1058 case 0x44A4:
1059 case 0x44A8:
1060 case 0x44AC:
1061 case 0x44B0:
1062 case 0x44B4:
1063 case 0x44B8:
1064 case 0x44BC:
1065 /* TX_FORMAT0_[0-15] */
1066 i = (reg - 0x4480) >> 2;
1067 tmp = ib_chunk->kdata[idx] & 0x7FF;
1068 track->textures[i].width = tmp + 1;
1069 tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF;
1070 track->textures[i].height = tmp + 1;
1071 tmp = (ib_chunk->kdata[idx] >> 26) & 0xF;
1072 track->textures[i].num_levels = tmp;
1073 tmp = ib_chunk->kdata[idx] & (1 << 31);
1074 track->textures[i].use_pitch = !!tmp;
1075 tmp = (ib_chunk->kdata[idx] >> 22) & 0xF;
1076 track->textures[i].txdepth = tmp;
1077 break;
3f8befec
DA
1078 case R300_ZB_ZPASS_ADDR:
1079 r = r100_cs_packet_next_reloc(p, &reloc);
1080 if (r) {
1081 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1082 idx, reg);
1083 r100_cs_dump_packet(p, pkt);
1084 return r;
1085 }
1086 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1087 break;
1088 case 0x4be8:
1089 /* valid register only on RV530 */
1090 if (p->rdev->family == CHIP_RV530)
1091 break;
1092 /* fallthrough do not move */
771fe6b9 1093 default:
068a117c
JG
1094 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1095 reg, idx);
771fe6b9
JG
1096 return -EINVAL;
1097 }
1098 return 0;
1099}
1100
1101static int r300_packet3_check(struct radeon_cs_parser *p,
1102 struct radeon_cs_packet *pkt)
1103{
1104 struct radeon_cs_chunk *ib_chunk;
551ebd83 1105
771fe6b9 1106 struct radeon_cs_reloc *reloc;
551ebd83 1107 struct r100_cs_track *track;
771fe6b9
JG
1108 volatile uint32_t *ib;
1109 unsigned idx;
1110 unsigned i, c;
1111 int r;
1112
1113 ib = p->ib->ptr;
1114 ib_chunk = &p->chunks[p->chunk_ib_idx];
1115 idx = pkt->idx + 1;
551ebd83 1116 track = (struct r100_cs_track *)p->track;
068a117c 1117 switch(pkt->opcode) {
771fe6b9 1118 case PACKET3_3D_LOAD_VBPNTR:
068a117c
JG
1119 c = ib_chunk->kdata[idx++] & 0x1F;
1120 track->num_arrays = c;
1121 for (i = 0; i < (c - 1); i+=2, idx+=3) {
771fe6b9
JG
1122 r = r100_cs_packet_next_reloc(p, &reloc);
1123 if (r) {
1124 DRM_ERROR("No reloc for packet3 %d\n",
1125 pkt->opcode);
1126 r100_cs_dump_packet(p, pkt);
1127 return r;
1128 }
1129 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
068a117c
JG
1130 track->arrays[i + 0].robj = reloc->robj;
1131 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1132 track->arrays[i + 0].esize &= 0x7F;
771fe6b9
JG
1133 r = r100_cs_packet_next_reloc(p, &reloc);
1134 if (r) {
1135 DRM_ERROR("No reloc for packet3 %d\n",
1136 pkt->opcode);
1137 r100_cs_dump_packet(p, pkt);
1138 return r;
1139 }
1140 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
068a117c
JG
1141 track->arrays[i + 1].robj = reloc->robj;
1142 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1143 track->arrays[i + 1].esize &= 0x7F;
771fe6b9
JG
1144 }
1145 if (c & 1) {
1146 r = r100_cs_packet_next_reloc(p, &reloc);
1147 if (r) {
1148 DRM_ERROR("No reloc for packet3 %d\n",
1149 pkt->opcode);
1150 r100_cs_dump_packet(p, pkt);
1151 return r;
1152 }
1153 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
068a117c
JG
1154 track->arrays[i + 0].robj = reloc->robj;
1155 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1156 track->arrays[i + 0].esize &= 0x7F;
771fe6b9
JG
1157 }
1158 break;
1159 case PACKET3_INDX_BUFFER:
1160 r = r100_cs_packet_next_reloc(p, &reloc);
1161 if (r) {
1162 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1163 r100_cs_dump_packet(p, pkt);
1164 return r;
1165 }
1166 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
068a117c
JG
1167 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1168 if (r) {
1169 return r;
1170 }
771fe6b9
JG
1171 break;
1172 /* Draw packet */
771fe6b9 1173 case PACKET3_3D_DRAW_IMMD:
068a117c
JG
1174 /* Number of dwords is vtx_size * (num_vertices - 1)
1175 * PRIM_WALK must be equal to 3 vertex data in embedded
1176 * in cmd stream */
1177 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
1178 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1179 return -EINVAL;
1180 }
1181 track->vap_vf_cntl = ib_chunk->kdata[idx+1];
1182 track->immd_dwords = pkt->count - 1;
551ebd83 1183 r = r100_cs_track_check(p->rdev, track);
068a117c
JG
1184 if (r) {
1185 return r;
1186 }
1187 break;
771fe6b9 1188 case PACKET3_3D_DRAW_IMMD_2:
068a117c
JG
1189 /* Number of dwords is vtx_size * (num_vertices - 1)
1190 * PRIM_WALK must be equal to 3 vertex data in embedded
1191 * in cmd stream */
1192 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
1193 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1194 return -EINVAL;
1195 }
1196 track->vap_vf_cntl = ib_chunk->kdata[idx];
1197 track->immd_dwords = pkt->count;
551ebd83 1198 r = r100_cs_track_check(p->rdev, track);
068a117c
JG
1199 if (r) {
1200 return r;
1201 }
1202 break;
1203 case PACKET3_3D_DRAW_VBUF:
1204 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
551ebd83 1205 r = r100_cs_track_check(p->rdev, track);
068a117c
JG
1206 if (r) {
1207 return r;
1208 }
1209 break;
1210 case PACKET3_3D_DRAW_VBUF_2:
1211 track->vap_vf_cntl = ib_chunk->kdata[idx];
551ebd83 1212 r = r100_cs_track_check(p->rdev, track);
068a117c
JG
1213 if (r) {
1214 return r;
1215 }
1216 break;
1217 case PACKET3_3D_DRAW_INDX:
1218 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
551ebd83 1219 r = r100_cs_track_check(p->rdev, track);
068a117c
JG
1220 if (r) {
1221 return r;
1222 }
1223 break;
771fe6b9 1224 case PACKET3_3D_DRAW_INDX_2:
068a117c 1225 track->vap_vf_cntl = ib_chunk->kdata[idx];
551ebd83 1226 r = r100_cs_track_check(p->rdev, track);
771fe6b9
JG
1227 if (r) {
1228 return r;
1229 }
1230 break;
1231 case PACKET3_NOP:
1232 break;
1233 default:
1234 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1235 return -EINVAL;
1236 }
1237 return 0;
1238}
1239
1240int r300_cs_parse(struct radeon_cs_parser *p)
1241{
1242 struct radeon_cs_packet pkt;
9f022ddf 1243 struct r100_cs_track *track;
771fe6b9
JG
1244 int r;
1245
9f022ddf
JG
1246 track = kzalloc(sizeof(*track), GFP_KERNEL);
1247 r100_cs_track_clear(p->rdev, track);
1248 p->track = track;
771fe6b9
JG
1249 do {
1250 r = r100_cs_packet_parse(p, &pkt, p->idx);
1251 if (r) {
1252 return r;
1253 }
1254 p->idx += pkt.count + 2;
1255 switch (pkt.type) {
1256 case PACKET_TYPE0:
1257 r = r100_cs_parse_packet0(p, &pkt,
068a117c
JG
1258 p->rdev->config.r300.reg_safe_bm,
1259 p->rdev->config.r300.reg_safe_bm_size,
771fe6b9
JG
1260 &r300_packet0_check);
1261 break;
1262 case PACKET_TYPE2:
1263 break;
1264 case PACKET_TYPE3:
1265 r = r300_packet3_check(p, &pkt);
1266 break;
1267 default:
1268 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1269 return -EINVAL;
1270 }
1271 if (r) {
1272 return r;
1273 }
1274 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1275 return 0;
1276}
068a117c 1277
9f022ddf 1278void r300_set_reg_safe(struct radeon_device *rdev)
068a117c
JG
1279{
1280 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1281 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
9f022ddf
JG
1282}
1283
1284int r300_init(struct radeon_device *rdev)
1285{
1286 r300_set_reg_safe(rdev);
068a117c
JG
1287 return 0;
1288}
9f022ddf
JG
1289
1290void r300_mc_program(struct radeon_device *rdev)
1291{
1292 struct r100_mc_save save;
1293 int r;
1294
1295 r = r100_debugfs_mc_info_init(rdev);
1296 if (r) {
1297 dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n");
1298 }
1299
1300 /* Stops all mc clients */
1301 r100_mc_stop(rdev, &save);
1302 /* Shutdown PCI/PCIE GART */
1303 radeon_gart_disable(rdev);
1304 if (rdev->flags & RADEON_IS_AGP) {
1305 WREG32(R_00014C_MC_AGP_LOCATION,
1306 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
1307 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
1308 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
1309 WREG32(R_00015C_AGP_BASE_2,
1310 upper_32_bits(rdev->mc.agp_base) & 0xff);
1311 } else {
1312 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
1313 WREG32(R_000170_AGP_BASE, 0);
1314 WREG32(R_00015C_AGP_BASE_2, 0);
1315 }
1316 /* Wait for mc idle */
1317 if (r300_mc_wait_for_idle(rdev))
1318 DRM_INFO("Failed to wait MC idle before programming MC.\n");
1319 /* Program MC, should be a 32bits limited address space */
1320 WREG32(R_000148_MC_FB_LOCATION,
1321 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
1322 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
1323 r100_mc_resume(rdev, &save);
1324}