Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- |
2 | */ | |
3 | /************************************************************************** | |
4 | * | |
5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | |
6 | * All Rights Reserved. | |
7 | * | |
8 | **************************************************************************/ | |
9 | ||
10 | #include "drmP.h" | |
11 | #include "drm.h" | |
12 | #include "i915_drm.h" | |
13 | #include "i915_drv.h" | |
14 | ||
15 | drm_ioctl_desc_t i915_ioctls[] = { | |
16 | [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, 1, 1}, | |
17 | [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, 1, 0}, | |
18 | [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, 1, 0}, | |
19 | [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, 1, 0}, | |
20 | [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, 1, 0}, | |
21 | [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, 1, 0}, | |
22 | [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, 1, 0}, | |
23 | [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, 1, 1}, | |
24 | [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, 1, 0}, | |
25 | [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, 1, 0}, | |
26 | [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, 1, 1}, | |
27 | [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, 1, 0} | |
28 | }; | |
29 | ||
30 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | |
31 | ||
32 | /* Really want an OS-independent resettable timer. Would like to have | |
33 | * this loop run for (eg) 3 sec, but have the timer reset every time | |
34 | * the head pointer changes, so that EBUSY only happens if the ring | |
35 | * actually stalls for (eg) 3 seconds. | |
36 | */ | |
37 | int i915_wait_ring(drm_device_t * dev, int n, const char *caller) | |
38 | { | |
39 | drm_i915_private_t *dev_priv = dev->dev_private; | |
40 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); | |
41 | u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; | |
42 | int i; | |
43 | ||
44 | for (i = 0; i < 10000; i++) { | |
45 | ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; | |
46 | ring->space = ring->head - (ring->tail + 8); | |
47 | if (ring->space < 0) | |
48 | ring->space += ring->Size; | |
49 | if (ring->space >= n) | |
50 | return 0; | |
51 | ||
52 | dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | |
53 | ||
54 | if (ring->head != last_head) | |
55 | i = 0; | |
56 | ||
57 | last_head = ring->head; | |
58 | } | |
59 | ||
60 | return DRM_ERR(EBUSY); | |
61 | } | |
62 | ||
63 | void i915_kernel_lost_context(drm_device_t * dev) | |
64 | { | |
65 | drm_i915_private_t *dev_priv = dev->dev_private; | |
66 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); | |
67 | ||
68 | ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; | |
69 | ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR; | |
70 | ring->space = ring->head - (ring->tail + 8); | |
71 | if (ring->space < 0) | |
72 | ring->space += ring->Size; | |
73 | ||
74 | if (ring->head == ring->tail) | |
75 | dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; | |
76 | } | |
77 | ||
78 | int i915_dma_cleanup(drm_device_t * dev) | |
79 | { | |
80 | /* Make sure interrupts are disabled here because the uninstall ioctl | |
81 | * may not have been called from userspace and after dev_private | |
82 | * is freed, it's too late. | |
83 | */ | |
84 | if (dev->irq) | |
85 | drm_irq_uninstall (dev); | |
86 | ||
87 | if (dev->dev_private) { | |
88 | drm_i915_private_t *dev_priv = | |
89 | (drm_i915_private_t *) dev->dev_private; | |
90 | ||
91 | if (dev_priv->ring.virtual_start) { | |
92 | drm_core_ioremapfree( &dev_priv->ring.map, dev); | |
93 | } | |
94 | ||
95 | if (dev_priv->hw_status_page) { | |
96 | drm_pci_free(dev, PAGE_SIZE, dev_priv->hw_status_page, | |
97 | dev_priv->dma_status_page); | |
98 | /* Need to rewrite hardware status page */ | |
99 | I915_WRITE(0x02080, 0x1ffff000); | |
100 | } | |
101 | ||
102 | drm_free (dev->dev_private, sizeof(drm_i915_private_t), | |
103 | DRM_MEM_DRIVER); | |
104 | ||
105 | dev->dev_private = NULL; | |
106 | } | |
107 | ||
108 | return 0; | |
109 | } | |
110 | ||
111 | static int i915_initialize(drm_device_t * dev, | |
112 | drm_i915_private_t * dev_priv, | |
113 | drm_i915_init_t * init) | |
114 | { | |
115 | memset(dev_priv, 0, sizeof(drm_i915_private_t)); | |
116 | ||
117 | DRM_GETSAREA(); | |
118 | if (!dev_priv->sarea) { | |
119 | DRM_ERROR("can not find sarea!\n"); | |
120 | dev->dev_private = (void *)dev_priv; | |
121 | i915_dma_cleanup(dev); | |
122 | return DRM_ERR(EINVAL); | |
123 | } | |
124 | ||
125 | dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); | |
126 | if (!dev_priv->mmio_map) { | |
127 | dev->dev_private = (void *)dev_priv; | |
128 | i915_dma_cleanup(dev); | |
129 | DRM_ERROR("can not find mmio map!\n"); | |
130 | return DRM_ERR(EINVAL); | |
131 | } | |
132 | ||
133 | dev_priv->sarea_priv = (drm_i915_sarea_t *) | |
134 | ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); | |
135 | ||
136 | dev_priv->ring.Start = init->ring_start; | |
137 | dev_priv->ring.End = init->ring_end; | |
138 | dev_priv->ring.Size = init->ring_size; | |
139 | dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; | |
140 | ||
141 | dev_priv->ring.map.offset = init->ring_start; | |
142 | dev_priv->ring.map.size = init->ring_size; | |
143 | dev_priv->ring.map.type = 0; | |
144 | dev_priv->ring.map.flags = 0; | |
145 | dev_priv->ring.map.mtrr = 0; | |
146 | ||
147 | drm_core_ioremap( &dev_priv->ring.map, dev ); | |
148 | ||
149 | if (dev_priv->ring.map.handle == NULL) { | |
150 | dev->dev_private = (void *)dev_priv; | |
151 | i915_dma_cleanup(dev); | |
152 | DRM_ERROR("can not ioremap virtual address for" | |
153 | " ring buffer\n"); | |
154 | return DRM_ERR(ENOMEM); | |
155 | } | |
156 | ||
157 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; | |
158 | ||
159 | dev_priv->back_offset = init->back_offset; | |
160 | dev_priv->front_offset = init->front_offset; | |
161 | dev_priv->current_page = 0; | |
162 | dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; | |
163 | ||
164 | /* We are using separate values as placeholders for mechanisms for | |
165 | * private backbuffer/depthbuffer usage. | |
166 | */ | |
167 | dev_priv->use_mi_batchbuffer_start = 0; | |
168 | ||
169 | /* Allow hardware batchbuffers unless told otherwise. | |
170 | */ | |
171 | dev_priv->allow_batchbuffer = 1; | |
172 | ||
173 | /* Program Hardware Status Page */ | |
174 | dev_priv->hw_status_page = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, | |
175 | 0xffffffff, | |
176 | &dev_priv->dma_status_page); | |
177 | ||
178 | if (!dev_priv->hw_status_page) { | |
179 | dev->dev_private = (void *)dev_priv; | |
180 | i915_dma_cleanup(dev); | |
181 | DRM_ERROR("Can not allocate hardware status page\n"); | |
182 | return DRM_ERR(ENOMEM); | |
183 | } | |
184 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | |
185 | DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); | |
186 | ||
187 | I915_WRITE(0x02080, dev_priv->dma_status_page); | |
188 | DRM_DEBUG("Enabled hardware status page\n"); | |
189 | ||
190 | dev->dev_private = (void *)dev_priv; | |
191 | ||
192 | return 0; | |
193 | } | |
194 | ||
195 | static int i915_resume(drm_device_t * dev) | |
196 | { | |
197 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
198 | ||
199 | DRM_DEBUG("%s\n", __FUNCTION__); | |
200 | ||
201 | if (!dev_priv->sarea) { | |
202 | DRM_ERROR("can not find sarea!\n"); | |
203 | return DRM_ERR(EINVAL); | |
204 | } | |
205 | ||
206 | if (!dev_priv->mmio_map) { | |
207 | DRM_ERROR("can not find mmio map!\n"); | |
208 | return DRM_ERR(EINVAL); | |
209 | } | |
210 | ||
211 | if (dev_priv->ring.map.handle == NULL) { | |
212 | DRM_ERROR("can not ioremap virtual address for" | |
213 | " ring buffer\n"); | |
214 | return DRM_ERR(ENOMEM); | |
215 | } | |
216 | ||
217 | /* Program Hardware Status Page */ | |
218 | if (!dev_priv->hw_status_page) { | |
219 | DRM_ERROR("Can not find hardware status page\n"); | |
220 | return DRM_ERR(EINVAL); | |
221 | } | |
222 | DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); | |
223 | ||
224 | I915_WRITE(0x02080, dev_priv->dma_status_page); | |
225 | DRM_DEBUG("Enabled hardware status page\n"); | |
226 | ||
227 | return 0; | |
228 | } | |
229 | ||
230 | int i915_dma_init(DRM_IOCTL_ARGS) | |
231 | { | |
232 | DRM_DEVICE; | |
233 | drm_i915_private_t *dev_priv; | |
234 | drm_i915_init_t init; | |
235 | int retcode = 0; | |
236 | ||
237 | DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data, | |
238 | sizeof(init)); | |
239 | ||
240 | switch (init.func) { | |
241 | case I915_INIT_DMA: | |
242 | dev_priv = drm_alloc (sizeof(drm_i915_private_t), | |
243 | DRM_MEM_DRIVER); | |
244 | if (dev_priv == NULL) | |
245 | return DRM_ERR(ENOMEM); | |
246 | retcode = i915_initialize(dev, dev_priv, &init); | |
247 | break; | |
248 | case I915_CLEANUP_DMA: | |
249 | retcode = i915_dma_cleanup(dev); | |
250 | break; | |
251 | case I915_RESUME_DMA: | |
252 | retcode = i915_resume(dev); | |
253 | break; | |
254 | default: | |
255 | retcode = -EINVAL; | |
256 | break; | |
257 | } | |
258 | ||
259 | return retcode; | |
260 | } | |
261 | ||
262 | /* Implement basically the same security restrictions as hardware does | |
263 | * for MI_BATCH_NON_SECURE. These can be made stricter at any time. | |
264 | * | |
265 | * Most of the calculations below involve calculating the size of a | |
266 | * particular instruction. It's important to get the size right as | |
267 | * that tells us where the next instruction to check is. Any illegal | |
268 | * instruction detected will be given a size of zero, which is a | |
269 | * signal to abort the rest of the buffer. | |
270 | */ | |
271 | static int do_validate_cmd(int cmd) | |
272 | { | |
273 | switch (((cmd >> 29) & 0x7)) { | |
274 | case 0x0: | |
275 | switch ((cmd >> 23) & 0x3f) { | |
276 | case 0x0: | |
277 | return 1; /* MI_NOOP */ | |
278 | case 0x4: | |
279 | return 1; /* MI_FLUSH */ | |
280 | default: | |
281 | return 0; /* disallow everything else */ | |
282 | } | |
283 | break; | |
284 | case 0x1: | |
285 | return 0; /* reserved */ | |
286 | case 0x2: | |
287 | return (cmd & 0xff) + 2; /* 2d commands */ | |
288 | case 0x3: | |
289 | if (((cmd >> 24) & 0x1f) <= 0x18) | |
290 | return 1; | |
291 | ||
292 | switch ((cmd >> 24) & 0x1f) { | |
293 | case 0x1c: | |
294 | return 1; | |
295 | case 0x1d: | |
296 | switch ((cmd>>16)&0xff) { | |
297 | case 0x3: | |
298 | return (cmd & 0x1f) + 2; | |
299 | case 0x4: | |
300 | return (cmd & 0xf) + 2; | |
301 | default: | |
302 | return (cmd & 0xffff) + 2; | |
303 | } | |
304 | case 0x1e: | |
305 | if (cmd & (1 << 23)) | |
306 | return (cmd & 0xffff) + 1; | |
307 | else | |
308 | return 1; | |
309 | case 0x1f: | |
310 | if ((cmd & (1 << 23)) == 0) /* inline vertices */ | |
311 | return (cmd & 0x1ffff) + 2; | |
312 | else if (cmd & (1 << 17)) /* indirect random */ | |
313 | if ((cmd & 0xffff) == 0) | |
314 | return 0; /* unknown length, too hard */ | |
315 | else | |
316 | return (((cmd & 0xffff) + 1) / 2) + 1; | |
317 | else | |
318 | return 2; /* indirect sequential */ | |
319 | default: | |
320 | return 0; | |
321 | } | |
322 | default: | |
323 | return 0; | |
324 | } | |
325 | ||
326 | return 0; | |
327 | } | |
328 | ||
329 | static int validate_cmd(int cmd) | |
330 | { | |
331 | int ret = do_validate_cmd(cmd); | |
332 | ||
333 | /* printk("validate_cmd( %x ): %d\n", cmd, ret); */ | |
334 | ||
335 | return ret; | |
336 | } | |
337 | ||
338 | static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords) | |
339 | { | |
340 | drm_i915_private_t *dev_priv = dev->dev_private; | |
341 | int i; | |
342 | RING_LOCALS; | |
343 | ||
344 | for (i = 0; i < dwords;) { | |
345 | int cmd, sz; | |
346 | ||
347 | if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) | |
348 | return DRM_ERR(EINVAL); | |
349 | ||
350 | /* printk("%d/%d ", i, dwords); */ | |
351 | ||
352 | if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) | |
353 | return DRM_ERR(EINVAL); | |
354 | ||
355 | BEGIN_LP_RING(sz); | |
356 | OUT_RING(cmd); | |
357 | ||
358 | while (++i, --sz) { | |
359 | if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], | |
360 | sizeof(cmd))) { | |
361 | return DRM_ERR(EINVAL); | |
362 | } | |
363 | OUT_RING(cmd); | |
364 | } | |
365 | ADVANCE_LP_RING(); | |
366 | } | |
367 | ||
368 | return 0; | |
369 | } | |
370 | ||
371 | static int i915_emit_box(drm_device_t * dev, | |
372 | drm_clip_rect_t __user * boxes, | |
373 | int i, int DR1, int DR4) | |
374 | { | |
375 | drm_i915_private_t *dev_priv = dev->dev_private; | |
376 | drm_clip_rect_t box; | |
377 | RING_LOCALS; | |
378 | ||
379 | if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { | |
380 | return EFAULT; | |
381 | } | |
382 | ||
383 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { | |
384 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | |
385 | box.x1, box.y1, box.x2, box.y2); | |
386 | return DRM_ERR(EINVAL); | |
387 | } | |
388 | ||
389 | BEGIN_LP_RING(6); | |
390 | OUT_RING(GFX_OP_DRAWRECT_INFO); | |
391 | OUT_RING(DR1); | |
392 | OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); | |
393 | OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); | |
394 | OUT_RING(DR4); | |
395 | OUT_RING(0); | |
396 | ADVANCE_LP_RING(); | |
397 | ||
398 | return 0; | |
399 | } | |
400 | ||
401 | static int i915_dispatch_cmdbuffer(drm_device_t * dev, | |
402 | drm_i915_cmdbuffer_t * cmd) | |
403 | { | |
404 | int nbox = cmd->num_cliprects; | |
405 | int i = 0, count, ret; | |
406 | ||
407 | if (cmd->sz & 0x3) { | |
408 | DRM_ERROR("alignment"); | |
409 | return DRM_ERR(EINVAL); | |
410 | } | |
411 | ||
412 | i915_kernel_lost_context(dev); | |
413 | ||
414 | count = nbox ? nbox : 1; | |
415 | ||
416 | for (i = 0; i < count; i++) { | |
417 | if (i < nbox) { | |
418 | ret = i915_emit_box(dev, cmd->cliprects, i, | |
419 | cmd->DR1, cmd->DR4); | |
420 | if (ret) | |
421 | return ret; | |
422 | } | |
423 | ||
424 | ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4); | |
425 | if (ret) | |
426 | return ret; | |
427 | } | |
428 | ||
429 | return 0; | |
430 | } | |
431 | ||
432 | static int i915_dispatch_batchbuffer(drm_device_t * dev, | |
433 | drm_i915_batchbuffer_t * batch) | |
434 | { | |
435 | drm_i915_private_t *dev_priv = dev->dev_private; | |
436 | drm_clip_rect_t __user *boxes = batch->cliprects; | |
437 | int nbox = batch->num_cliprects; | |
438 | int i = 0, count; | |
439 | RING_LOCALS; | |
440 | ||
441 | if ((batch->start | batch->used) & 0x7) { | |
442 | DRM_ERROR("alignment"); | |
443 | return DRM_ERR(EINVAL); | |
444 | } | |
445 | ||
446 | i915_kernel_lost_context(dev); | |
447 | ||
448 | count = nbox ? nbox : 1; | |
449 | ||
450 | for (i = 0; i < count; i++) { | |
451 | if (i < nbox) { | |
452 | int ret = i915_emit_box(dev, boxes, i, | |
453 | batch->DR1, batch->DR4); | |
454 | if (ret) | |
455 | return ret; | |
456 | } | |
457 | ||
458 | if (dev_priv->use_mi_batchbuffer_start) { | |
459 | BEGIN_LP_RING(2); | |
460 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); | |
461 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | |
462 | ADVANCE_LP_RING(); | |
463 | } else { | |
464 | BEGIN_LP_RING(4); | |
465 | OUT_RING(MI_BATCH_BUFFER); | |
466 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | |
467 | OUT_RING(batch->start + batch->used - 4); | |
468 | OUT_RING(0); | |
469 | ADVANCE_LP_RING(); | |
470 | } | |
471 | } | |
472 | ||
473 | dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; | |
474 | ||
475 | BEGIN_LP_RING(4); | |
476 | OUT_RING(CMD_STORE_DWORD_IDX); | |
477 | OUT_RING(20); | |
478 | OUT_RING(dev_priv->counter); | |
479 | OUT_RING(0); | |
480 | ADVANCE_LP_RING(); | |
481 | ||
482 | return 0; | |
483 | } | |
484 | ||
485 | static int i915_dispatch_flip(drm_device_t * dev) | |
486 | { | |
487 | drm_i915_private_t *dev_priv = dev->dev_private; | |
488 | RING_LOCALS; | |
489 | ||
490 | DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", | |
491 | __FUNCTION__, | |
492 | dev_priv->current_page, | |
493 | dev_priv->sarea_priv->pf_current_page); | |
494 | ||
495 | i915_kernel_lost_context(dev); | |
496 | ||
497 | BEGIN_LP_RING(2); | |
498 | OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); | |
499 | OUT_RING(0); | |
500 | ADVANCE_LP_RING(); | |
501 | ||
502 | BEGIN_LP_RING(6); | |
503 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); | |
504 | OUT_RING(0); | |
505 | if (dev_priv->current_page == 0) { | |
506 | OUT_RING(dev_priv->back_offset); | |
507 | dev_priv->current_page = 1; | |
508 | } else { | |
509 | OUT_RING(dev_priv->front_offset); | |
510 | dev_priv->current_page = 0; | |
511 | } | |
512 | OUT_RING(0); | |
513 | ADVANCE_LP_RING(); | |
514 | ||
515 | BEGIN_LP_RING(2); | |
516 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); | |
517 | OUT_RING(0); | |
518 | ADVANCE_LP_RING(); | |
519 | ||
520 | dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; | |
521 | ||
522 | BEGIN_LP_RING(4); | |
523 | OUT_RING(CMD_STORE_DWORD_IDX); | |
524 | OUT_RING(20); | |
525 | OUT_RING(dev_priv->counter); | |
526 | OUT_RING(0); | |
527 | ADVANCE_LP_RING(); | |
528 | ||
529 | dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; | |
530 | return 0; | |
531 | } | |
532 | ||
533 | static int i915_quiescent(drm_device_t * dev) | |
534 | { | |
535 | drm_i915_private_t *dev_priv = dev->dev_private; | |
536 | ||
537 | i915_kernel_lost_context(dev); | |
538 | return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); | |
539 | } | |
540 | ||
541 | int i915_flush_ioctl(DRM_IOCTL_ARGS) | |
542 | { | |
543 | DRM_DEVICE; | |
544 | ||
545 | LOCK_TEST_WITH_RETURN(dev, filp); | |
546 | ||
547 | return i915_quiescent(dev); | |
548 | } | |
549 | ||
550 | int i915_batchbuffer(DRM_IOCTL_ARGS) | |
551 | { | |
552 | DRM_DEVICE; | |
553 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
554 | u32 *hw_status = dev_priv->hw_status_page; | |
555 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | |
556 | dev_priv->sarea_priv; | |
557 | drm_i915_batchbuffer_t batch; | |
558 | int ret; | |
559 | ||
560 | if (!dev_priv->allow_batchbuffer) { | |
561 | DRM_ERROR("Batchbuffer ioctl disabled\n"); | |
562 | return DRM_ERR(EINVAL); | |
563 | } | |
564 | ||
565 | DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data, | |
566 | sizeof(batch)); | |
567 | ||
568 | DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", | |
569 | batch.start, batch.used, batch.num_cliprects); | |
570 | ||
571 | LOCK_TEST_WITH_RETURN(dev, filp); | |
572 | ||
573 | if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects, | |
574 | batch.num_cliprects * | |
575 | sizeof(drm_clip_rect_t))) | |
576 | return DRM_ERR(EFAULT); | |
577 | ||
578 | ret = i915_dispatch_batchbuffer(dev, &batch); | |
579 | ||
580 | sarea_priv->last_dispatch = (int)hw_status[5]; | |
581 | return ret; | |
582 | } | |
583 | ||
584 | int i915_cmdbuffer(DRM_IOCTL_ARGS) | |
585 | { | |
586 | DRM_DEVICE; | |
587 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
588 | u32 *hw_status = dev_priv->hw_status_page; | |
589 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | |
590 | dev_priv->sarea_priv; | |
591 | drm_i915_cmdbuffer_t cmdbuf; | |
592 | int ret; | |
593 | ||
594 | DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data, | |
595 | sizeof(cmdbuf)); | |
596 | ||
597 | DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", | |
598 | cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects); | |
599 | ||
600 | LOCK_TEST_WITH_RETURN(dev, filp); | |
601 | ||
602 | if (cmdbuf.num_cliprects && | |
603 | DRM_VERIFYAREA_READ(cmdbuf.cliprects, | |
604 | cmdbuf.num_cliprects * | |
605 | sizeof(drm_clip_rect_t))) { | |
606 | DRM_ERROR("Fault accessing cliprects\n"); | |
607 | return DRM_ERR(EFAULT); | |
608 | } | |
609 | ||
610 | ret = i915_dispatch_cmdbuffer(dev, &cmdbuf); | |
611 | if (ret) { | |
612 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); | |
613 | return ret; | |
614 | } | |
615 | ||
616 | sarea_priv->last_dispatch = (int)hw_status[5]; | |
617 | return 0; | |
618 | } | |
619 | ||
620 | int i915_do_cleanup_pageflip(drm_device_t * dev) | |
621 | { | |
622 | drm_i915_private_t *dev_priv = dev->dev_private; | |
623 | ||
624 | DRM_DEBUG("%s\n", __FUNCTION__); | |
625 | if (dev_priv->current_page != 0) | |
626 | i915_dispatch_flip(dev); | |
627 | ||
628 | return 0; | |
629 | } | |
630 | ||
631 | int i915_flip_bufs(DRM_IOCTL_ARGS) | |
632 | { | |
633 | DRM_DEVICE; | |
634 | ||
635 | DRM_DEBUG("%s\n", __FUNCTION__); | |
636 | ||
637 | LOCK_TEST_WITH_RETURN(dev, filp); | |
638 | ||
639 | return i915_dispatch_flip(dev); | |
640 | } | |
641 | ||
642 | int i915_getparam(DRM_IOCTL_ARGS) | |
643 | { | |
644 | DRM_DEVICE; | |
645 | drm_i915_private_t *dev_priv = dev->dev_private; | |
646 | drm_i915_getparam_t param; | |
647 | int value; | |
648 | ||
649 | if (!dev_priv) { | |
650 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | |
651 | return DRM_ERR(EINVAL); | |
652 | } | |
653 | ||
654 | DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data, | |
655 | sizeof(param)); | |
656 | ||
657 | switch (param.param) { | |
658 | case I915_PARAM_IRQ_ACTIVE: | |
659 | value = dev->irq ? 1 : 0; | |
660 | break; | |
661 | case I915_PARAM_ALLOW_BATCHBUFFER: | |
662 | value = dev_priv->allow_batchbuffer ? 1 : 0; | |
663 | break; | |
664 | default: | |
665 | DRM_ERROR("Unkown parameter %d\n", param.param); | |
666 | return DRM_ERR(EINVAL); | |
667 | } | |
668 | ||
669 | if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { | |
670 | DRM_ERROR("DRM_COPY_TO_USER failed\n"); | |
671 | return DRM_ERR(EFAULT); | |
672 | } | |
673 | ||
674 | return 0; | |
675 | } | |
676 | ||
677 | int i915_setparam(DRM_IOCTL_ARGS) | |
678 | { | |
679 | DRM_DEVICE; | |
680 | drm_i915_private_t *dev_priv = dev->dev_private; | |
681 | drm_i915_setparam_t param; | |
682 | ||
683 | if (!dev_priv) { | |
684 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | |
685 | return DRM_ERR(EINVAL); | |
686 | } | |
687 | ||
688 | DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data, | |
689 | sizeof(param)); | |
690 | ||
691 | switch (param.param) { | |
692 | case I915_SETPARAM_USE_MI_BATCHBUFFER_START: | |
693 | dev_priv->use_mi_batchbuffer_start = param.value; | |
694 | break; | |
695 | case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: | |
696 | dev_priv->tex_lru_log_granularity = param.value; | |
697 | break; | |
698 | case I915_SETPARAM_ALLOW_BATCHBUFFER: | |
699 | dev_priv->allow_batchbuffer = param.value; | |
700 | break; | |
701 | default: | |
702 | DRM_ERROR("unknown parameter %d\n", param.param); | |
703 | return DRM_ERR(EINVAL); | |
704 | } | |
705 | ||
706 | return 0; | |
707 | } | |
708 | ||
709 | void i915_driver_pretakedown(drm_device_t *dev) | |
710 | { | |
711 | if ( dev->dev_private ) { | |
712 | drm_i915_private_t *dev_priv = dev->dev_private; | |
713 | i915_mem_takedown( &(dev_priv->agp_heap) ); | |
714 | } | |
715 | i915_dma_cleanup( dev ); | |
716 | } | |
717 | ||
718 | void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp) | |
719 | { | |
720 | if ( dev->dev_private ) { | |
721 | drm_i915_private_t *dev_priv = dev->dev_private; | |
722 | i915_mem_release( dev, filp, dev_priv->agp_heap ); | |
723 | } | |
724 | } | |
725 |