i915: add new pciids for 945GME, 965GME/GLE
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / char / drm / i915_dma.c
CommitLineData
1da177e4
LT
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4
LT
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
2f02cc3f
EA
34#define IS_I965G(dev) (dev->pci_device == 0x2972 || \
35 dev->pci_device == 0x2982 || \
36 dev->pci_device == 0x2992 || \
ce7dd063 37 dev->pci_device == 0x29A2 || \
2f4042b1
WZ
38 dev->pci_device == 0x2A02 || \
39 dev->pci_device == 0x2A12)
c29b669c 40
1da177e4
LT
41/* Really want an OS-independent resettable timer. Would like to have
42 * this loop run for (eg) 3 sec, but have the timer reset every time
43 * the head pointer changes, so that EBUSY only happens if the ring
44 * actually stalls for (eg) 3 seconds.
45 */
46int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
47{
48 drm_i915_private_t *dev_priv = dev->dev_private;
49 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
50 u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
51 int i;
52
53 for (i = 0; i < 10000; i++) {
54 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
55 ring->space = ring->head - (ring->tail + 8);
56 if (ring->space < 0)
57 ring->space += ring->Size;
58 if (ring->space >= n)
59 return 0;
60
61 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
62
63 if (ring->head != last_head)
64 i = 0;
65
66 last_head = ring->head;
67 }
68
69 return DRM_ERR(EBUSY);
70}
71
72void i915_kernel_lost_context(drm_device_t * dev)
73{
74 drm_i915_private_t *dev_priv = dev->dev_private;
75 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
76
77 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
78 ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
79 ring->space = ring->head - (ring->tail + 8);
80 if (ring->space < 0)
81 ring->space += ring->Size;
82
83 if (ring->head == ring->tail)
84 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
85}
86
c94f7029 87static int i915_dma_cleanup(drm_device_t * dev)
1da177e4
LT
88{
89 /* Make sure interrupts are disabled here because the uninstall ioctl
90 * may not have been called from userspace and after dev_private
91 * is freed, it's too late.
92 */
93 if (dev->irq)
b5e89ed5 94 drm_irq_uninstall(dev);
1da177e4
LT
95
96 if (dev->dev_private) {
97 drm_i915_private_t *dev_priv =
98 (drm_i915_private_t *) dev->dev_private;
99
100 if (dev_priv->ring.virtual_start) {
b5e89ed5 101 drm_core_ioremapfree(&dev_priv->ring.map, dev);
1da177e4
LT
102 }
103
9c8da5eb
DA
104 if (dev_priv->status_page_dmah) {
105 drm_pci_free(dev, dev_priv->status_page_dmah);
1da177e4
LT
106 /* Need to rewrite hardware status page */
107 I915_WRITE(0x02080, 0x1ffff000);
108 }
109
b5e89ed5
DA
110 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
111 DRM_MEM_DRIVER);
1da177e4
LT
112
113 dev->dev_private = NULL;
114 }
115
116 return 0;
117}
118
119static int i915_initialize(drm_device_t * dev,
120 drm_i915_private_t * dev_priv,
121 drm_i915_init_t * init)
122{
123 memset(dev_priv, 0, sizeof(drm_i915_private_t));
124
125 DRM_GETSAREA();
126 if (!dev_priv->sarea) {
127 DRM_ERROR("can not find sarea!\n");
128 dev->dev_private = (void *)dev_priv;
129 i915_dma_cleanup(dev);
130 return DRM_ERR(EINVAL);
131 }
132
133 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
134 if (!dev_priv->mmio_map) {
135 dev->dev_private = (void *)dev_priv;
136 i915_dma_cleanup(dev);
137 DRM_ERROR("can not find mmio map!\n");
138 return DRM_ERR(EINVAL);
139 }
140
141 dev_priv->sarea_priv = (drm_i915_sarea_t *)
142 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
143
144 dev_priv->ring.Start = init->ring_start;
145 dev_priv->ring.End = init->ring_end;
146 dev_priv->ring.Size = init->ring_size;
147 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
148
149 dev_priv->ring.map.offset = init->ring_start;
150 dev_priv->ring.map.size = init->ring_size;
151 dev_priv->ring.map.type = 0;
152 dev_priv->ring.map.flags = 0;
153 dev_priv->ring.map.mtrr = 0;
154
b5e89ed5 155 drm_core_ioremap(&dev_priv->ring.map, dev);
1da177e4
LT
156
157 if (dev_priv->ring.map.handle == NULL) {
158 dev->dev_private = (void *)dev_priv;
159 i915_dma_cleanup(dev);
160 DRM_ERROR("can not ioremap virtual address for"
161 " ring buffer\n");
162 return DRM_ERR(ENOMEM);
163 }
164
165 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
166
a6b54f3f 167 dev_priv->cpp = init->cpp;
1da177e4
LT
168 dev_priv->back_offset = init->back_offset;
169 dev_priv->front_offset = init->front_offset;
170 dev_priv->current_page = 0;
171 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
172
173 /* We are using separate values as placeholders for mechanisms for
174 * private backbuffer/depthbuffer usage.
175 */
176 dev_priv->use_mi_batchbuffer_start = 0;
177
178 /* Allow hardware batchbuffers unless told otherwise.
179 */
180 dev_priv->allow_batchbuffer = 1;
181
182 /* Program Hardware Status Page */
9c8da5eb
DA
183 dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
184 0xffffffff);
1da177e4 185
9c8da5eb 186 if (!dev_priv->status_page_dmah) {
1da177e4
LT
187 dev->dev_private = (void *)dev_priv;
188 i915_dma_cleanup(dev);
189 DRM_ERROR("Can not allocate hardware status page\n");
190 return DRM_ERR(ENOMEM);
191 }
9c8da5eb
DA
192 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
193 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
194
1da177e4
LT
195 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
196 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
197
198 I915_WRITE(0x02080, dev_priv->dma_status_page);
199 DRM_DEBUG("Enabled hardware status page\n");
200
201 dev->dev_private = (void *)dev_priv;
202
203 return 0;
204}
205
0d6aa60b 206static int i915_dma_resume(drm_device_t * dev)
1da177e4
LT
207{
208 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
209
210 DRM_DEBUG("%s\n", __FUNCTION__);
211
212 if (!dev_priv->sarea) {
213 DRM_ERROR("can not find sarea!\n");
214 return DRM_ERR(EINVAL);
215 }
216
217 if (!dev_priv->mmio_map) {
218 DRM_ERROR("can not find mmio map!\n");
219 return DRM_ERR(EINVAL);
220 }
221
222 if (dev_priv->ring.map.handle == NULL) {
223 DRM_ERROR("can not ioremap virtual address for"
224 " ring buffer\n");
225 return DRM_ERR(ENOMEM);
226 }
227
228 /* Program Hardware Status Page */
229 if (!dev_priv->hw_status_page) {
230 DRM_ERROR("Can not find hardware status page\n");
231 return DRM_ERR(EINVAL);
232 }
233 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
234
235 I915_WRITE(0x02080, dev_priv->dma_status_page);
236 DRM_DEBUG("Enabled hardware status page\n");
237
238 return 0;
239}
240
c94f7029 241static int i915_dma_init(DRM_IOCTL_ARGS)
1da177e4
LT
242{
243 DRM_DEVICE;
244 drm_i915_private_t *dev_priv;
245 drm_i915_init_t init;
246 int retcode = 0;
247
248 DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data,
249 sizeof(init));
250
251 switch (init.func) {
252 case I915_INIT_DMA:
b5e89ed5
DA
253 dev_priv = drm_alloc(sizeof(drm_i915_private_t),
254 DRM_MEM_DRIVER);
1da177e4
LT
255 if (dev_priv == NULL)
256 return DRM_ERR(ENOMEM);
257 retcode = i915_initialize(dev, dev_priv, &init);
258 break;
259 case I915_CLEANUP_DMA:
260 retcode = i915_dma_cleanup(dev);
261 break;
262 case I915_RESUME_DMA:
0d6aa60b 263 retcode = i915_dma_resume(dev);
1da177e4
LT
264 break;
265 default:
46acbf13 266 retcode = DRM_ERR(EINVAL);
1da177e4
LT
267 break;
268 }
269
270 return retcode;
271}
272
273/* Implement basically the same security restrictions as hardware does
274 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
275 *
276 * Most of the calculations below involve calculating the size of a
277 * particular instruction. It's important to get the size right as
278 * that tells us where the next instruction to check is. Any illegal
279 * instruction detected will be given a size of zero, which is a
280 * signal to abort the rest of the buffer.
281 */
282static int do_validate_cmd(int cmd)
283{
284 switch (((cmd >> 29) & 0x7)) {
285 case 0x0:
286 switch ((cmd >> 23) & 0x3f) {
287 case 0x0:
288 return 1; /* MI_NOOP */
289 case 0x4:
290 return 1; /* MI_FLUSH */
291 default:
292 return 0; /* disallow everything else */
293 }
294 break;
295 case 0x1:
296 return 0; /* reserved */
297 case 0x2:
298 return (cmd & 0xff) + 2; /* 2d commands */
299 case 0x3:
300 if (((cmd >> 24) & 0x1f) <= 0x18)
301 return 1;
302
303 switch ((cmd >> 24) & 0x1f) {
304 case 0x1c:
305 return 1;
306 case 0x1d:
b5e89ed5 307 switch ((cmd >> 16) & 0xff) {
1da177e4
LT
308 case 0x3:
309 return (cmd & 0x1f) + 2;
310 case 0x4:
311 return (cmd & 0xf) + 2;
312 default:
313 return (cmd & 0xffff) + 2;
314 }
315 case 0x1e:
316 if (cmd & (1 << 23))
317 return (cmd & 0xffff) + 1;
318 else
319 return 1;
320 case 0x1f:
321 if ((cmd & (1 << 23)) == 0) /* inline vertices */
322 return (cmd & 0x1ffff) + 2;
323 else if (cmd & (1 << 17)) /* indirect random */
324 if ((cmd & 0xffff) == 0)
325 return 0; /* unknown length, too hard */
326 else
327 return (((cmd & 0xffff) + 1) / 2) + 1;
328 else
329 return 2; /* indirect sequential */
330 default:
331 return 0;
332 }
333 default:
334 return 0;
335 }
336
337 return 0;
338}
339
340static int validate_cmd(int cmd)
341{
342 int ret = do_validate_cmd(cmd);
343
344/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
345
346 return ret;
347}
348
349static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
350{
351 drm_i915_private_t *dev_priv = dev->dev_private;
352 int i;
353 RING_LOCALS;
354
de227f5f
DA
355 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
356 return DRM_ERR(EINVAL);
357
c29b669c 358 BEGIN_LP_RING((dwords+1)&~1);
de227f5f 359
1da177e4
LT
360 for (i = 0; i < dwords;) {
361 int cmd, sz;
362
363 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
364 return DRM_ERR(EINVAL);
365
1da177e4
LT
366 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
367 return DRM_ERR(EINVAL);
368
1da177e4
LT
369 OUT_RING(cmd);
370
371 while (++i, --sz) {
372 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
373 sizeof(cmd))) {
374 return DRM_ERR(EINVAL);
375 }
376 OUT_RING(cmd);
377 }
1da177e4
LT
378 }
379
de227f5f
DA
380 if (dwords & 1)
381 OUT_RING(0);
382
383 ADVANCE_LP_RING();
384
1da177e4
LT
385 return 0;
386}
387
388static int i915_emit_box(drm_device_t * dev,
389 drm_clip_rect_t __user * boxes,
390 int i, int DR1, int DR4)
391{
392 drm_i915_private_t *dev_priv = dev->dev_private;
393 drm_clip_rect_t box;
394 RING_LOCALS;
395
396 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
46acbf13 397 return DRM_ERR(EFAULT);
1da177e4
LT
398 }
399
400 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
401 DRM_ERROR("Bad box %d,%d..%d,%d\n",
402 box.x1, box.y1, box.x2, box.y2);
403 return DRM_ERR(EINVAL);
404 }
405
c29b669c
AH
406 if (IS_I965G(dev)) {
407 BEGIN_LP_RING(4);
408 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
409 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
78eca43d 410 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
c29b669c
AH
411 OUT_RING(DR4);
412 ADVANCE_LP_RING();
413 } else {
414 BEGIN_LP_RING(6);
415 OUT_RING(GFX_OP_DRAWRECT_INFO);
416 OUT_RING(DR1);
417 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
418 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
419 OUT_RING(DR4);
420 OUT_RING(0);
421 ADVANCE_LP_RING();
422 }
1da177e4
LT
423
424 return 0;
425}
426
c29b669c
AH
427/* XXX: Emitting the counter should really be moved to part of the IRQ
428 * emit. For now, do it in both places:
429 */
430
de227f5f
DA
431static void i915_emit_breadcrumb(drm_device_t *dev)
432{
433 drm_i915_private_t *dev_priv = dev->dev_private;
434 RING_LOCALS;
435
c29b669c
AH
436 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
437
438 if (dev_priv->counter > 0x7FFFFFFFUL)
439 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
de227f5f
DA
440
441 BEGIN_LP_RING(4);
442 OUT_RING(CMD_STORE_DWORD_IDX);
443 OUT_RING(20);
444 OUT_RING(dev_priv->counter);
445 OUT_RING(0);
446 ADVANCE_LP_RING();
447}
448
1da177e4
LT
449static int i915_dispatch_cmdbuffer(drm_device_t * dev,
450 drm_i915_cmdbuffer_t * cmd)
451{
452 int nbox = cmd->num_cliprects;
453 int i = 0, count, ret;
454
455 if (cmd->sz & 0x3) {
456 DRM_ERROR("alignment");
457 return DRM_ERR(EINVAL);
458 }
459
460 i915_kernel_lost_context(dev);
461
462 count = nbox ? nbox : 1;
463
464 for (i = 0; i < count; i++) {
465 if (i < nbox) {
466 ret = i915_emit_box(dev, cmd->cliprects, i,
467 cmd->DR1, cmd->DR4);
468 if (ret)
469 return ret;
470 }
471
472 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
473 if (ret)
474 return ret;
475 }
476
de227f5f 477 i915_emit_breadcrumb(dev);
1da177e4
LT
478 return 0;
479}
480
481static int i915_dispatch_batchbuffer(drm_device_t * dev,
482 drm_i915_batchbuffer_t * batch)
483{
484 drm_i915_private_t *dev_priv = dev->dev_private;
485 drm_clip_rect_t __user *boxes = batch->cliprects;
486 int nbox = batch->num_cliprects;
487 int i = 0, count;
488 RING_LOCALS;
489
490 if ((batch->start | batch->used) & 0x7) {
491 DRM_ERROR("alignment");
492 return DRM_ERR(EINVAL);
493 }
494
495 i915_kernel_lost_context(dev);
496
497 count = nbox ? nbox : 1;
498
499 for (i = 0; i < count; i++) {
500 if (i < nbox) {
501 int ret = i915_emit_box(dev, boxes, i,
502 batch->DR1, batch->DR4);
503 if (ret)
504 return ret;
505 }
506
507 if (dev_priv->use_mi_batchbuffer_start) {
508 BEGIN_LP_RING(2);
509 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
510 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
511 ADVANCE_LP_RING();
512 } else {
513 BEGIN_LP_RING(4);
514 OUT_RING(MI_BATCH_BUFFER);
515 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
516 OUT_RING(batch->start + batch->used - 4);
517 OUT_RING(0);
518 ADVANCE_LP_RING();
519 }
520 }
521
de227f5f 522 i915_emit_breadcrumb(dev);
1da177e4
LT
523
524 return 0;
525}
526
527static int i915_dispatch_flip(drm_device_t * dev)
528{
529 drm_i915_private_t *dev_priv = dev->dev_private;
530 RING_LOCALS;
531
532 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
533 __FUNCTION__,
534 dev_priv->current_page,
535 dev_priv->sarea_priv->pf_current_page);
536
537 i915_kernel_lost_context(dev);
538
539 BEGIN_LP_RING(2);
540 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
541 OUT_RING(0);
542 ADVANCE_LP_RING();
543
544 BEGIN_LP_RING(6);
545 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
546 OUT_RING(0);
547 if (dev_priv->current_page == 0) {
548 OUT_RING(dev_priv->back_offset);
549 dev_priv->current_page = 1;
550 } else {
551 OUT_RING(dev_priv->front_offset);
552 dev_priv->current_page = 0;
553 }
554 OUT_RING(0);
555 ADVANCE_LP_RING();
556
557 BEGIN_LP_RING(2);
558 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
559 OUT_RING(0);
560 ADVANCE_LP_RING();
561
562 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
563
564 BEGIN_LP_RING(4);
565 OUT_RING(CMD_STORE_DWORD_IDX);
566 OUT_RING(20);
567 OUT_RING(dev_priv->counter);
568 OUT_RING(0);
569 ADVANCE_LP_RING();
570
571 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
572 return 0;
573}
574
575static int i915_quiescent(drm_device_t * dev)
576{
577 drm_i915_private_t *dev_priv = dev->dev_private;
578
579 i915_kernel_lost_context(dev);
580 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
581}
582
c94f7029 583static int i915_flush_ioctl(DRM_IOCTL_ARGS)
1da177e4
LT
584{
585 DRM_DEVICE;
586
587 LOCK_TEST_WITH_RETURN(dev, filp);
588
589 return i915_quiescent(dev);
590}
591
c94f7029 592static int i915_batchbuffer(DRM_IOCTL_ARGS)
1da177e4
LT
593{
594 DRM_DEVICE;
595 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
596 u32 *hw_status = dev_priv->hw_status_page;
597 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
598 dev_priv->sarea_priv;
599 drm_i915_batchbuffer_t batch;
600 int ret;
601
602 if (!dev_priv->allow_batchbuffer) {
603 DRM_ERROR("Batchbuffer ioctl disabled\n");
604 return DRM_ERR(EINVAL);
605 }
606
607 DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data,
608 sizeof(batch));
609
610 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
611 batch.start, batch.used, batch.num_cliprects);
612
613 LOCK_TEST_WITH_RETURN(dev, filp);
614
615 if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
616 batch.num_cliprects *
617 sizeof(drm_clip_rect_t)))
618 return DRM_ERR(EFAULT);
619
620 ret = i915_dispatch_batchbuffer(dev, &batch);
621
622 sarea_priv->last_dispatch = (int)hw_status[5];
623 return ret;
624}
625
c94f7029 626static int i915_cmdbuffer(DRM_IOCTL_ARGS)
1da177e4
LT
627{
628 DRM_DEVICE;
629 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
630 u32 *hw_status = dev_priv->hw_status_page;
631 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
632 dev_priv->sarea_priv;
633 drm_i915_cmdbuffer_t cmdbuf;
634 int ret;
635
636 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data,
637 sizeof(cmdbuf));
638
639 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
640 cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects);
641
642 LOCK_TEST_WITH_RETURN(dev, filp);
643
644 if (cmdbuf.num_cliprects &&
645 DRM_VERIFYAREA_READ(cmdbuf.cliprects,
646 cmdbuf.num_cliprects *
647 sizeof(drm_clip_rect_t))) {
648 DRM_ERROR("Fault accessing cliprects\n");
649 return DRM_ERR(EFAULT);
650 }
651
652 ret = i915_dispatch_cmdbuffer(dev, &cmdbuf);
653 if (ret) {
654 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
655 return ret;
656 }
657
658 sarea_priv->last_dispatch = (int)hw_status[5];
659 return 0;
660}
661
c94f7029 662static int i915_flip_bufs(DRM_IOCTL_ARGS)
1da177e4
LT
663{
664 DRM_DEVICE;
665
666 DRM_DEBUG("%s\n", __FUNCTION__);
667
668 LOCK_TEST_WITH_RETURN(dev, filp);
669
670 return i915_dispatch_flip(dev);
671}
672
c94f7029 673static int i915_getparam(DRM_IOCTL_ARGS)
1da177e4
LT
674{
675 DRM_DEVICE;
676 drm_i915_private_t *dev_priv = dev->dev_private;
677 drm_i915_getparam_t param;
678 int value;
679
680 if (!dev_priv) {
681 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
682 return DRM_ERR(EINVAL);
683 }
684
685 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data,
686 sizeof(param));
687
688 switch (param.param) {
689 case I915_PARAM_IRQ_ACTIVE:
690 value = dev->irq ? 1 : 0;
691 break;
692 case I915_PARAM_ALLOW_BATCHBUFFER:
693 value = dev_priv->allow_batchbuffer ? 1 : 0;
694 break;
0d6aa60b
DA
695 case I915_PARAM_LAST_DISPATCH:
696 value = READ_BREADCRUMB(dev_priv);
697 break;
1da177e4 698 default:
de227f5f 699 DRM_ERROR("Unknown parameter %d\n", param.param);
1da177e4
LT
700 return DRM_ERR(EINVAL);
701 }
702
703 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
704 DRM_ERROR("DRM_COPY_TO_USER failed\n");
705 return DRM_ERR(EFAULT);
706 }
707
708 return 0;
709}
710
c94f7029 711static int i915_setparam(DRM_IOCTL_ARGS)
1da177e4
LT
712{
713 DRM_DEVICE;
714 drm_i915_private_t *dev_priv = dev->dev_private;
715 drm_i915_setparam_t param;
716
717 if (!dev_priv) {
718 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
719 return DRM_ERR(EINVAL);
720 }
721
722 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data,
723 sizeof(param));
724
725 switch (param.param) {
726 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
727 dev_priv->use_mi_batchbuffer_start = param.value;
728 break;
729 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
730 dev_priv->tex_lru_log_granularity = param.value;
731 break;
732 case I915_SETPARAM_ALLOW_BATCHBUFFER:
733 dev_priv->allow_batchbuffer = param.value;
734 break;
735 default:
736 DRM_ERROR("unknown parameter %d\n", param.param);
737 return DRM_ERR(EINVAL);
738 }
739
740 return 0;
741}
742
22eae947
DA
743int i915_driver_load(drm_device_t *dev, unsigned long flags)
744{
745 /* i915 has 4 more counters */
746 dev->counters += 4;
747 dev->types[6] = _DRM_STAT_IRQ;
748 dev->types[7] = _DRM_STAT_PRIMARY;
749 dev->types[8] = _DRM_STAT_SECONDARY;
750 dev->types[9] = _DRM_STAT_DMA;
751
752 return 0;
753}
754
755void i915_driver_lastclose(drm_device_t * dev)
1da177e4 756{
b5e89ed5 757 if (dev->dev_private) {
1da177e4 758 drm_i915_private_t *dev_priv = dev->dev_private;
b5e89ed5
DA
759 i915_mem_takedown(&(dev_priv->agp_heap));
760 }
761 i915_dma_cleanup(dev);
1da177e4
LT
762}
763
22eae947 764void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
1da177e4 765{
b5e89ed5 766 if (dev->dev_private) {
1da177e4 767 drm_i915_private_t *dev_priv = dev->dev_private;
b5e89ed5 768 i915_mem_release(dev, filp, dev_priv->agp_heap);
1da177e4
LT
769 }
770}
771
c94f7029 772drm_ioctl_desc_t i915_ioctls[] = {
a7a2cc31
DA
773 [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
774 [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH},
775 [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH},
776 [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH},
777 [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH},
778 [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH},
779 [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH},
780 [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
781 [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH},
782 [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH},
783 [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
de227f5f 784 [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH},
702880f2
DA
785 [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
786 [DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] = { i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
787 [DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH },
a6b54f3f 788 [DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH},
c94f7029
DA
789};
790
791int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
cda17380
DA
792
793/**
794 * Determine if the device really is AGP or not.
795 *
796 * All Intel graphics chipsets are treated as AGP, even if they are really
797 * PCI-e.
798 *
799 * \param dev The device to be tested.
800 *
801 * \returns
802 * A value of 1 is always retured to indictate every i9x5 is AGP.
803 */
804int i915_driver_device_is_agp(drm_device_t * dev)
805{
806 return 1;
807}