2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
35 i915_gem_object_set_domain(struct drm_gem_object
*obj
,
36 uint32_t read_domains
,
37 uint32_t write_domain
);
39 i915_gem_object_set_domain_range(struct drm_gem_object
*obj
,
42 uint32_t read_domains
,
43 uint32_t write_domain
);
45 i915_gem_set_domain(struct drm_gem_object
*obj
,
46 struct drm_file
*file_priv
,
47 uint32_t read_domains
,
48 uint32_t write_domain
);
49 static int i915_gem_object_get_page_list(struct drm_gem_object
*obj
);
50 static void i915_gem_object_free_page_list(struct drm_gem_object
*obj
);
51 static int i915_gem_object_wait_rendering(struct drm_gem_object
*obj
);
54 i915_gem_init_ioctl(struct drm_device
*dev
, void *data
,
55 struct drm_file
*file_priv
)
57 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
58 struct drm_i915_gem_init
*args
= data
;
60 mutex_lock(&dev
->struct_mutex
);
62 if (args
->gtt_start
>= args
->gtt_end
||
63 (args
->gtt_start
& (PAGE_SIZE
- 1)) != 0 ||
64 (args
->gtt_end
& (PAGE_SIZE
- 1)) != 0) {
65 mutex_unlock(&dev
->struct_mutex
);
69 drm_mm_init(&dev_priv
->mm
.gtt_space
, args
->gtt_start
,
70 args
->gtt_end
- args
->gtt_start
);
72 dev
->gtt_total
= (uint32_t) (args
->gtt_end
- args
->gtt_start
);
74 mutex_unlock(&dev
->struct_mutex
);
81 * Creates a new mm object and returns a handle to it.
84 i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
85 struct drm_file
*file_priv
)
87 struct drm_i915_gem_create
*args
= data
;
88 struct drm_gem_object
*obj
;
91 args
->size
= roundup(args
->size
, PAGE_SIZE
);
93 /* Allocate the new object */
94 obj
= drm_gem_object_alloc(dev
, args
->size
);
98 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
99 mutex_lock(&dev
->struct_mutex
);
100 drm_gem_object_handle_unreference(obj
);
101 mutex_unlock(&dev
->struct_mutex
);
106 args
->handle
= handle
;
112 * Reads data from the object referenced by handle.
114 * On error, the contents of *data are undefined.
117 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
118 struct drm_file
*file_priv
)
120 struct drm_i915_gem_pread
*args
= data
;
121 struct drm_gem_object
*obj
;
122 struct drm_i915_gem_object
*obj_priv
;
127 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
130 obj_priv
= obj
->driver_private
;
132 /* Bounds check source.
134 * XXX: This could use review for overflow issues...
136 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
137 args
->offset
+ args
->size
> obj
->size
) {
138 drm_gem_object_unreference(obj
);
142 mutex_lock(&dev
->struct_mutex
);
144 ret
= i915_gem_object_set_domain_range(obj
, args
->offset
, args
->size
,
145 I915_GEM_DOMAIN_CPU
, 0);
147 drm_gem_object_unreference(obj
);
148 mutex_unlock(&dev
->struct_mutex
);
152 offset
= args
->offset
;
154 read
= vfs_read(obj
->filp
, (char __user
*)(uintptr_t)args
->data_ptr
,
155 args
->size
, &offset
);
156 if (read
!= args
->size
) {
157 drm_gem_object_unreference(obj
);
158 mutex_unlock(&dev
->struct_mutex
);
165 drm_gem_object_unreference(obj
);
166 mutex_unlock(&dev
->struct_mutex
);
172 i915_gem_gtt_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
173 struct drm_i915_gem_pwrite
*args
,
174 struct drm_file
*file_priv
)
176 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
179 char __user
*user_data
;
185 unsigned long unwritten
;
187 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
189 if (!access_ok(VERIFY_READ
, user_data
, remain
))
193 mutex_lock(&dev
->struct_mutex
);
194 ret
= i915_gem_object_pin(obj
, 0);
196 mutex_unlock(&dev
->struct_mutex
);
199 ret
= i915_gem_set_domain(obj
, file_priv
,
200 I915_GEM_DOMAIN_GTT
, I915_GEM_DOMAIN_GTT
);
204 obj_priv
= obj
->driver_private
;
205 offset
= obj_priv
->gtt_offset
+ args
->offset
;
209 /* Operation in this page
212 * o = offset within page
215 i
= offset
>> PAGE_SHIFT
;
216 o
= offset
& (PAGE_SIZE
-1);
218 if ((o
+ l
) > PAGE_SIZE
)
221 pfn
= (dev
->agp
->base
>> PAGE_SHIFT
) + i
;
223 #ifdef CONFIG_HIGHMEM
224 /* This is a workaround for the low performance of iounmap
225 * (approximate 10% cpu cost on normal 3D workloads).
226 * kmap_atomic on HIGHMEM kernels happens to let us map card
227 * memory without taking IPIs. When the vmap rework lands
228 * we should be able to dump this hack.
230 vaddr_atomic
= kmap_atomic_pfn(pfn
, KM_USER0
);
232 DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
233 i
, o
, l
, pfn
, vaddr_atomic
);
235 unwritten
= __copy_from_user_inatomic_nocache(vaddr_atomic
+ o
,
237 kunmap_atomic(vaddr_atomic
, KM_USER0
);
240 #endif /* CONFIG_HIGHMEM */
242 vaddr
= ioremap_wc(pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
244 DRM_INFO("pwrite slow i %d o %d l %d "
245 "pfn %ld vaddr %p\n",
246 i
, o
, l
, pfn
, vaddr
);
252 unwritten
= __copy_from_user(vaddr
+ o
, user_data
, l
);
254 DRM_INFO("unwritten %ld\n", unwritten
);
267 #if WATCH_PWRITE && 1
268 i915_gem_clflush_object(obj
);
269 i915_gem_dump_object(obj
, args
->offset
+ args
->size
, __func__
, ~0);
270 i915_gem_clflush_object(obj
);
274 i915_gem_object_unpin(obj
);
275 mutex_unlock(&dev
->struct_mutex
);
281 i915_gem_shmem_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
282 struct drm_i915_gem_pwrite
*args
,
283 struct drm_file
*file_priv
)
289 mutex_lock(&dev
->struct_mutex
);
291 ret
= i915_gem_set_domain(obj
, file_priv
,
292 I915_GEM_DOMAIN_CPU
, I915_GEM_DOMAIN_CPU
);
294 mutex_unlock(&dev
->struct_mutex
);
298 offset
= args
->offset
;
300 written
= vfs_write(obj
->filp
,
301 (char __user
*)(uintptr_t) args
->data_ptr
,
302 args
->size
, &offset
);
303 if (written
!= args
->size
) {
304 mutex_unlock(&dev
->struct_mutex
);
311 mutex_unlock(&dev
->struct_mutex
);
317 * Writes data to the object referenced by handle.
319 * On error, the contents of the buffer that were to be modified are undefined.
322 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
323 struct drm_file
*file_priv
)
325 struct drm_i915_gem_pwrite
*args
= data
;
326 struct drm_gem_object
*obj
;
327 struct drm_i915_gem_object
*obj_priv
;
330 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
333 obj_priv
= obj
->driver_private
;
335 /* Bounds check destination.
337 * XXX: This could use review for overflow issues...
339 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
340 args
->offset
+ args
->size
> obj
->size
) {
341 drm_gem_object_unreference(obj
);
345 /* We can only do the GTT pwrite on untiled buffers, as otherwise
346 * it would end up going through the fenced access, and we'll get
347 * different detiling behavior between reading and writing.
348 * pread/pwrite currently are reading and writing from the CPU
349 * perspective, requiring manual detiling by the client.
351 if (obj_priv
->tiling_mode
== I915_TILING_NONE
&&
353 ret
= i915_gem_gtt_pwrite(dev
, obj
, args
, file_priv
);
355 ret
= i915_gem_shmem_pwrite(dev
, obj
, args
, file_priv
);
359 DRM_INFO("pwrite failed %d\n", ret
);
362 drm_gem_object_unreference(obj
);
368 * Called when user space prepares to use an object
371 i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
372 struct drm_file
*file_priv
)
374 struct drm_i915_gem_set_domain
*args
= data
;
375 struct drm_gem_object
*obj
;
378 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
381 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
385 mutex_lock(&dev
->struct_mutex
);
387 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
388 obj
, obj
->size
, args
->read_domains
, args
->write_domain
);
390 ret
= i915_gem_set_domain(obj
, file_priv
,
391 args
->read_domains
, args
->write_domain
);
392 drm_gem_object_unreference(obj
);
393 mutex_unlock(&dev
->struct_mutex
);
398 * Called when user space has done writes to this buffer
401 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
402 struct drm_file
*file_priv
)
404 struct drm_i915_gem_sw_finish
*args
= data
;
405 struct drm_gem_object
*obj
;
406 struct drm_i915_gem_object
*obj_priv
;
409 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
412 mutex_lock(&dev
->struct_mutex
);
413 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
415 mutex_unlock(&dev
->struct_mutex
);
420 DRM_INFO("%s: sw_finish %d (%p %d)\n",
421 __func__
, args
->handle
, obj
, obj
->size
);
423 obj_priv
= obj
->driver_private
;
425 /* Pinned buffers may be scanout, so flush the cache */
426 if ((obj
->write_domain
& I915_GEM_DOMAIN_CPU
) && obj_priv
->pin_count
) {
427 i915_gem_clflush_object(obj
);
428 drm_agp_chipset_flush(dev
);
430 drm_gem_object_unreference(obj
);
431 mutex_unlock(&dev
->struct_mutex
);
436 * Maps the contents of an object, returning the address it is mapped
439 * While the mapping holds a reference on the contents of the object, it doesn't
440 * imply a ref on the object itself.
443 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
444 struct drm_file
*file_priv
)
446 struct drm_i915_gem_mmap
*args
= data
;
447 struct drm_gem_object
*obj
;
451 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
454 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
458 offset
= args
->offset
;
460 down_write(¤t
->mm
->mmap_sem
);
461 addr
= do_mmap(obj
->filp
, 0, args
->size
,
462 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
464 up_write(¤t
->mm
->mmap_sem
);
465 mutex_lock(&dev
->struct_mutex
);
466 drm_gem_object_unreference(obj
);
467 mutex_unlock(&dev
->struct_mutex
);
468 if (IS_ERR((void *)addr
))
471 args
->addr_ptr
= (uint64_t) addr
;
477 i915_gem_object_free_page_list(struct drm_gem_object
*obj
)
479 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
480 int page_count
= obj
->size
/ PAGE_SIZE
;
483 if (obj_priv
->page_list
== NULL
)
487 for (i
= 0; i
< page_count
; i
++)
488 if (obj_priv
->page_list
[i
] != NULL
) {
490 set_page_dirty(obj_priv
->page_list
[i
]);
491 mark_page_accessed(obj_priv
->page_list
[i
]);
492 page_cache_release(obj_priv
->page_list
[i
]);
496 drm_free(obj_priv
->page_list
,
497 page_count
* sizeof(struct page
*),
499 obj_priv
->page_list
= NULL
;
503 i915_gem_object_move_to_active(struct drm_gem_object
*obj
)
505 struct drm_device
*dev
= obj
->dev
;
506 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
507 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
509 /* Add a reference if we're newly entering the active list. */
510 if (!obj_priv
->active
) {
511 drm_gem_object_reference(obj
);
512 obj_priv
->active
= 1;
514 /* Move from whatever list we were on to the tail of execution. */
515 list_move_tail(&obj_priv
->list
,
516 &dev_priv
->mm
.active_list
);
521 i915_gem_object_move_to_inactive(struct drm_gem_object
*obj
)
523 struct drm_device
*dev
= obj
->dev
;
524 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
525 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
527 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
528 if (obj_priv
->pin_count
!= 0)
529 list_del_init(&obj_priv
->list
);
531 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
533 if (obj_priv
->active
) {
534 obj_priv
->active
= 0;
535 drm_gem_object_unreference(obj
);
537 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
541 * Creates a new sequence number, emitting a write of it to the status page
542 * plus an interrupt, which will trigger i915_user_interrupt_handler.
544 * Must be called with struct_lock held.
546 * Returned sequence numbers are nonzero on success.
549 i915_add_request(struct drm_device
*dev
, uint32_t flush_domains
)
551 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
552 struct drm_i915_gem_request
*request
;
557 request
= drm_calloc(1, sizeof(*request
), DRM_MEM_DRIVER
);
561 /* Grab the seqno we're going to make this request be, and bump the
562 * next (skipping 0 so it can be the reserved no-seqno value).
564 seqno
= dev_priv
->mm
.next_gem_seqno
;
565 dev_priv
->mm
.next_gem_seqno
++;
566 if (dev_priv
->mm
.next_gem_seqno
== 0)
567 dev_priv
->mm
.next_gem_seqno
++;
570 OUT_RING(MI_STORE_DWORD_INDEX
);
571 OUT_RING(I915_GEM_HWS_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
574 OUT_RING(MI_USER_INTERRUPT
);
577 DRM_DEBUG("%d\n", seqno
);
579 request
->seqno
= seqno
;
580 request
->emitted_jiffies
= jiffies
;
581 request
->flush_domains
= flush_domains
;
582 was_empty
= list_empty(&dev_priv
->mm
.request_list
);
583 list_add_tail(&request
->list
, &dev_priv
->mm
.request_list
);
586 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
591 * Command execution barrier
593 * Ensures that all commands in the ring are finished
594 * before signalling the CPU
597 i915_retire_commands(struct drm_device
*dev
)
599 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
600 uint32_t cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
601 uint32_t flush_domains
= 0;
604 /* The sampler always gets flushed on i965 (sigh) */
606 flush_domains
|= I915_GEM_DOMAIN_SAMPLER
;
609 OUT_RING(0); /* noop */
611 return flush_domains
;
615 * Moves buffers associated only with the given active seqno from the active
616 * to inactive list, potentially freeing them.
619 i915_gem_retire_request(struct drm_device
*dev
,
620 struct drm_i915_gem_request
*request
)
622 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
624 /* Move any buffers on the active list that are no longer referenced
625 * by the ringbuffer to the flushing/inactive lists as appropriate.
627 while (!list_empty(&dev_priv
->mm
.active_list
)) {
628 struct drm_gem_object
*obj
;
629 struct drm_i915_gem_object
*obj_priv
;
631 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
632 struct drm_i915_gem_object
,
636 /* If the seqno being retired doesn't match the oldest in the
637 * list, then the oldest in the list must still be newer than
640 if (obj_priv
->last_rendering_seqno
!= request
->seqno
)
643 DRM_INFO("%s: retire %d moves to inactive list %p\n",
644 __func__
, request
->seqno
, obj
);
647 if (obj
->write_domain
!= 0) {
648 list_move_tail(&obj_priv
->list
,
649 &dev_priv
->mm
.flushing_list
);
651 i915_gem_object_move_to_inactive(obj
);
655 if (request
->flush_domains
!= 0) {
656 struct drm_i915_gem_object
*obj_priv
, *next
;
658 /* Clear the write domain and activity from any buffers
659 * that are just waiting for a flush matching the one retired.
661 list_for_each_entry_safe(obj_priv
, next
,
662 &dev_priv
->mm
.flushing_list
, list
) {
663 struct drm_gem_object
*obj
= obj_priv
->obj
;
665 if (obj
->write_domain
& request
->flush_domains
) {
666 obj
->write_domain
= 0;
667 i915_gem_object_move_to_inactive(obj
);
675 * Returns true if seq1 is later than seq2.
678 i915_seqno_passed(uint32_t seq1
, uint32_t seq2
)
680 return (int32_t)(seq1
- seq2
) >= 0;
684 i915_get_gem_seqno(struct drm_device
*dev
)
686 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
688 return READ_HWSP(dev_priv
, I915_GEM_HWS_INDEX
);
692 * This function clears the request list as sequence numbers are passed.
695 i915_gem_retire_requests(struct drm_device
*dev
)
697 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
700 seqno
= i915_get_gem_seqno(dev
);
702 while (!list_empty(&dev_priv
->mm
.request_list
)) {
703 struct drm_i915_gem_request
*request
;
704 uint32_t retiring_seqno
;
706 request
= list_first_entry(&dev_priv
->mm
.request_list
,
707 struct drm_i915_gem_request
,
709 retiring_seqno
= request
->seqno
;
711 if (i915_seqno_passed(seqno
, retiring_seqno
) ||
712 dev_priv
->mm
.wedged
) {
713 i915_gem_retire_request(dev
, request
);
715 list_del(&request
->list
);
716 drm_free(request
, sizeof(*request
), DRM_MEM_DRIVER
);
723 i915_gem_retire_work_handler(struct work_struct
*work
)
725 drm_i915_private_t
*dev_priv
;
726 struct drm_device
*dev
;
728 dev_priv
= container_of(work
, drm_i915_private_t
,
729 mm
.retire_work
.work
);
732 mutex_lock(&dev
->struct_mutex
);
733 i915_gem_retire_requests(dev
);
734 if (!list_empty(&dev_priv
->mm
.request_list
))
735 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
736 mutex_unlock(&dev
->struct_mutex
);
740 * Waits for a sequence number to be signaled, and cleans up the
741 * request and object lists appropriately for that event.
744 i915_wait_request(struct drm_device
*dev
, uint32_t seqno
)
746 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
751 if (!i915_seqno_passed(i915_get_gem_seqno(dev
), seqno
)) {
752 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
753 i915_user_irq_get(dev
);
754 ret
= wait_event_interruptible(dev_priv
->irq_queue
,
755 i915_seqno_passed(i915_get_gem_seqno(dev
),
757 dev_priv
->mm
.wedged
);
758 i915_user_irq_put(dev
);
759 dev_priv
->mm
.waiting_gem_seqno
= 0;
761 if (dev_priv
->mm
.wedged
)
764 if (ret
&& ret
!= -ERESTARTSYS
)
765 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
766 __func__
, ret
, seqno
, i915_get_gem_seqno(dev
));
768 /* Directly dispatch request retiring. While we have the work queue
769 * to handle this, the waiter on a request often wants an associated
770 * buffer to have made it to the inactive list, and we would need
771 * a separate wait queue to handle that.
774 i915_gem_retire_requests(dev
);
780 i915_gem_flush(struct drm_device
*dev
,
781 uint32_t invalidate_domains
,
782 uint32_t flush_domains
)
784 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
789 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__
,
790 invalidate_domains
, flush_domains
);
793 if (flush_domains
& I915_GEM_DOMAIN_CPU
)
794 drm_agp_chipset_flush(dev
);
796 if ((invalidate_domains
| flush_domains
) & ~(I915_GEM_DOMAIN_CPU
|
797 I915_GEM_DOMAIN_GTT
)) {
801 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
802 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
803 * also flushed at 2d versus 3d pipeline switches.
807 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
808 * MI_READ_FLUSH is set, and is always flushed on 965.
810 * I915_GEM_DOMAIN_COMMAND may not exist?
812 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
813 * invalidated when MI_EXE_FLUSH is set.
815 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
816 * invalidated with every MI_FLUSH.
820 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
821 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
822 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
823 * are flushed at any MI_FLUSH.
826 cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
827 if ((invalidate_domains
|flush_domains
) &
828 I915_GEM_DOMAIN_RENDER
)
829 cmd
&= ~MI_NO_WRITE_FLUSH
;
830 if (!IS_I965G(dev
)) {
832 * On the 965, the sampler cache always gets flushed
833 * and this bit is reserved.
835 if (invalidate_domains
& I915_GEM_DOMAIN_SAMPLER
)
836 cmd
|= MI_READ_FLUSH
;
838 if (invalidate_domains
& I915_GEM_DOMAIN_INSTRUCTION
)
842 DRM_INFO("%s: queue flush %08x to ring\n", __func__
, cmd
);
846 OUT_RING(0); /* noop */
852 * Ensures that all rendering to the object has completed and the object is
853 * safe to unbind from the GTT or access from the CPU.
856 i915_gem_object_wait_rendering(struct drm_gem_object
*obj
)
858 struct drm_device
*dev
= obj
->dev
;
859 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
862 /* If there are writes queued to the buffer, flush and
863 * create a new seqno to wait for.
865 if (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
)) {
866 uint32_t write_domain
= obj
->write_domain
;
868 DRM_INFO("%s: flushing object %p from write domain %08x\n",
869 __func__
, obj
, write_domain
);
871 i915_gem_flush(dev
, 0, write_domain
);
873 i915_gem_object_move_to_active(obj
);
874 obj_priv
->last_rendering_seqno
= i915_add_request(dev
,
876 BUG_ON(obj_priv
->last_rendering_seqno
== 0);
878 DRM_INFO("%s: flush moves to exec list %p\n", __func__
, obj
);
882 /* If there is rendering queued on the buffer being evicted, wait for
885 if (obj_priv
->active
) {
887 DRM_INFO("%s: object %p wait for seqno %08x\n",
888 __func__
, obj
, obj_priv
->last_rendering_seqno
);
890 ret
= i915_wait_request(dev
, obj_priv
->last_rendering_seqno
);
899 * Unbinds an object from the GTT aperture.
902 i915_gem_object_unbind(struct drm_gem_object
*obj
)
904 struct drm_device
*dev
= obj
->dev
;
905 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
909 DRM_INFO("%s:%d %p\n", __func__
, __LINE__
, obj
);
910 DRM_INFO("gtt_space %p\n", obj_priv
->gtt_space
);
912 if (obj_priv
->gtt_space
== NULL
)
915 if (obj_priv
->pin_count
!= 0) {
916 DRM_ERROR("Attempting to unbind pinned buffer\n");
920 /* Wait for any rendering to complete
922 ret
= i915_gem_object_wait_rendering(obj
);
924 DRM_ERROR("wait_rendering failed: %d\n", ret
);
928 /* Move the object to the CPU domain to ensure that
929 * any possible CPU writes while it's not in the GTT
930 * are flushed when we go to remap it. This will
931 * also ensure that all pending GPU writes are finished
934 ret
= i915_gem_object_set_domain(obj
, I915_GEM_DOMAIN_CPU
,
935 I915_GEM_DOMAIN_CPU
);
937 DRM_ERROR("set_domain failed: %d\n", ret
);
941 if (obj_priv
->agp_mem
!= NULL
) {
942 drm_unbind_agp(obj_priv
->agp_mem
);
943 drm_free_agp(obj_priv
->agp_mem
, obj
->size
/ PAGE_SIZE
);
944 obj_priv
->agp_mem
= NULL
;
947 BUG_ON(obj_priv
->active
);
949 i915_gem_object_free_page_list(obj
);
951 if (obj_priv
->gtt_space
) {
952 atomic_dec(&dev
->gtt_count
);
953 atomic_sub(obj
->size
, &dev
->gtt_memory
);
955 drm_mm_put_block(obj_priv
->gtt_space
);
956 obj_priv
->gtt_space
= NULL
;
959 /* Remove ourselves from the LRU list if present. */
960 if (!list_empty(&obj_priv
->list
))
961 list_del_init(&obj_priv
->list
);
967 i915_gem_evict_something(struct drm_device
*dev
)
969 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
970 struct drm_gem_object
*obj
;
971 struct drm_i915_gem_object
*obj_priv
;
975 /* If there's an inactive buffer available now, grab it
978 if (!list_empty(&dev_priv
->mm
.inactive_list
)) {
979 obj_priv
= list_first_entry(&dev_priv
->mm
.inactive_list
,
980 struct drm_i915_gem_object
,
983 BUG_ON(obj_priv
->pin_count
!= 0);
985 DRM_INFO("%s: evicting %p\n", __func__
, obj
);
987 BUG_ON(obj_priv
->active
);
989 /* Wait on the rendering and unbind the buffer. */
990 ret
= i915_gem_object_unbind(obj
);
994 /* If we didn't get anything, but the ring is still processing
995 * things, wait for one of those things to finish and hopefully
996 * leave us a buffer to evict.
998 if (!list_empty(&dev_priv
->mm
.request_list
)) {
999 struct drm_i915_gem_request
*request
;
1001 request
= list_first_entry(&dev_priv
->mm
.request_list
,
1002 struct drm_i915_gem_request
,
1005 ret
= i915_wait_request(dev
, request
->seqno
);
1009 /* if waiting caused an object to become inactive,
1010 * then loop around and wait for it. Otherwise, we
1011 * assume that waiting freed and unbound something,
1012 * so there should now be some space in the GTT
1014 if (!list_empty(&dev_priv
->mm
.inactive_list
))
1019 /* If we didn't have anything on the request list but there
1020 * are buffers awaiting a flush, emit one and try again.
1021 * When we wait on it, those buffers waiting for that flush
1022 * will get moved to inactive.
1024 if (!list_empty(&dev_priv
->mm
.flushing_list
)) {
1025 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
1026 struct drm_i915_gem_object
,
1028 obj
= obj_priv
->obj
;
1033 i915_add_request(dev
, obj
->write_domain
);
1039 DRM_ERROR("inactive empty %d request empty %d "
1040 "flushing empty %d\n",
1041 list_empty(&dev_priv
->mm
.inactive_list
),
1042 list_empty(&dev_priv
->mm
.request_list
),
1043 list_empty(&dev_priv
->mm
.flushing_list
));
1044 /* If we didn't do any of the above, there's nothing to be done
1045 * and we just can't fit it in.
1053 i915_gem_object_get_page_list(struct drm_gem_object
*obj
)
1055 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1057 struct address_space
*mapping
;
1058 struct inode
*inode
;
1062 if (obj_priv
->page_list
)
1065 /* Get the list of pages out of our struct file. They'll be pinned
1066 * at this point until we release them.
1068 page_count
= obj
->size
/ PAGE_SIZE
;
1069 BUG_ON(obj_priv
->page_list
!= NULL
);
1070 obj_priv
->page_list
= drm_calloc(page_count
, sizeof(struct page
*),
1072 if (obj_priv
->page_list
== NULL
) {
1073 DRM_ERROR("Faled to allocate page list\n");
1077 inode
= obj
->filp
->f_path
.dentry
->d_inode
;
1078 mapping
= inode
->i_mapping
;
1079 for (i
= 0; i
< page_count
; i
++) {
1080 page
= read_mapping_page(mapping
, i
, NULL
);
1082 ret
= PTR_ERR(page
);
1083 DRM_ERROR("read_mapping_page failed: %d\n", ret
);
1084 i915_gem_object_free_page_list(obj
);
1087 obj_priv
->page_list
[i
] = page
;
1093 * Finds free space in the GTT aperture and binds the object there.
1096 i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
, unsigned alignment
)
1098 struct drm_device
*dev
= obj
->dev
;
1099 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1100 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1101 struct drm_mm_node
*free_space
;
1102 int page_count
, ret
;
1105 alignment
= PAGE_SIZE
;
1106 if (alignment
& (PAGE_SIZE
- 1)) {
1107 DRM_ERROR("Invalid object alignment requested %u\n", alignment
);
1112 free_space
= drm_mm_search_free(&dev_priv
->mm
.gtt_space
,
1113 obj
->size
, alignment
, 0);
1114 if (free_space
!= NULL
) {
1115 obj_priv
->gtt_space
= drm_mm_get_block(free_space
, obj
->size
,
1117 if (obj_priv
->gtt_space
!= NULL
) {
1118 obj_priv
->gtt_space
->private = obj
;
1119 obj_priv
->gtt_offset
= obj_priv
->gtt_space
->start
;
1122 if (obj_priv
->gtt_space
== NULL
) {
1123 /* If the gtt is empty and we're still having trouble
1124 * fitting our object in, we're out of memory.
1127 DRM_INFO("%s: GTT full, evicting something\n", __func__
);
1129 if (list_empty(&dev_priv
->mm
.inactive_list
) &&
1130 list_empty(&dev_priv
->mm
.flushing_list
) &&
1131 list_empty(&dev_priv
->mm
.active_list
)) {
1132 DRM_ERROR("GTT full, but LRU list empty\n");
1136 ret
= i915_gem_evict_something(dev
);
1138 DRM_ERROR("Failed to evict a buffer %d\n", ret
);
1145 DRM_INFO("Binding object of size %d at 0x%08x\n",
1146 obj
->size
, obj_priv
->gtt_offset
);
1148 ret
= i915_gem_object_get_page_list(obj
);
1150 drm_mm_put_block(obj_priv
->gtt_space
);
1151 obj_priv
->gtt_space
= NULL
;
1155 page_count
= obj
->size
/ PAGE_SIZE
;
1156 /* Create an AGP memory structure pointing at our pages, and bind it
1159 obj_priv
->agp_mem
= drm_agp_bind_pages(dev
,
1160 obj_priv
->page_list
,
1162 obj_priv
->gtt_offset
);
1163 if (obj_priv
->agp_mem
== NULL
) {
1164 i915_gem_object_free_page_list(obj
);
1165 drm_mm_put_block(obj_priv
->gtt_space
);
1166 obj_priv
->gtt_space
= NULL
;
1169 atomic_inc(&dev
->gtt_count
);
1170 atomic_add(obj
->size
, &dev
->gtt_memory
);
1172 /* Assert that the object is not currently in any GPU domain. As it
1173 * wasn't in the GTT, there shouldn't be any way it could have been in
1176 BUG_ON(obj
->read_domains
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1177 BUG_ON(obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1183 i915_gem_clflush_object(struct drm_gem_object
*obj
)
1185 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1187 /* If we don't have a page list set up, then we're not pinned
1188 * to GPU, and we can ignore the cache flush because it'll happen
1189 * again at bind time.
1191 if (obj_priv
->page_list
== NULL
)
1194 drm_clflush_pages(obj_priv
->page_list
, obj
->size
/ PAGE_SIZE
);
1198 * Set the next domain for the specified object. This
1199 * may not actually perform the necessary flushing/invaliding though,
1200 * as that may want to be batched with other set_domain operations
1202 * This is (we hope) the only really tricky part of gem. The goal
1203 * is fairly simple -- track which caches hold bits of the object
1204 * and make sure they remain coherent. A few concrete examples may
1205 * help to explain how it works. For shorthand, we use the notation
1206 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1207 * a pair of read and write domain masks.
1209 * Case 1: the batch buffer
1215 * 5. Unmapped from GTT
1218 * Let's take these a step at a time
1221 * Pages allocated from the kernel may still have
1222 * cache contents, so we set them to (CPU, CPU) always.
1223 * 2. Written by CPU (using pwrite)
1224 * The pwrite function calls set_domain (CPU, CPU) and
1225 * this function does nothing (as nothing changes)
1227 * This function asserts that the object is not
1228 * currently in any GPU-based read or write domains
1230 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1231 * As write_domain is zero, this function adds in the
1232 * current read domains (CPU+COMMAND, 0).
1233 * flush_domains is set to CPU.
1234 * invalidate_domains is set to COMMAND
1235 * clflush is run to get data out of the CPU caches
1236 * then i915_dev_set_domain calls i915_gem_flush to
1237 * emit an MI_FLUSH and drm_agp_chipset_flush
1238 * 5. Unmapped from GTT
1239 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1240 * flush_domains and invalidate_domains end up both zero
1241 * so no flushing/invalidating happens
1245 * Case 2: The shared render buffer
1249 * 3. Read/written by GPU
1250 * 4. set_domain to (CPU,CPU)
1251 * 5. Read/written by CPU
1252 * 6. Read/written by GPU
1255 * Same as last example, (CPU, CPU)
1257 * Nothing changes (assertions find that it is not in the GPU)
1258 * 3. Read/written by GPU
1259 * execbuffer calls set_domain (RENDER, RENDER)
1260 * flush_domains gets CPU
1261 * invalidate_domains gets GPU
1263 * MI_FLUSH and drm_agp_chipset_flush
1264 * 4. set_domain (CPU, CPU)
1265 * flush_domains gets GPU
1266 * invalidate_domains gets CPU
1267 * wait_rendering (obj) to make sure all drawing is complete.
1268 * This will include an MI_FLUSH to get the data from GPU
1270 * clflush (obj) to invalidate the CPU cache
1271 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1272 * 5. Read/written by CPU
1273 * cache lines are loaded and dirtied
1274 * 6. Read written by GPU
1275 * Same as last GPU access
1277 * Case 3: The constant buffer
1282 * 4. Updated (written) by CPU again
1291 * flush_domains = CPU
1292 * invalidate_domains = RENDER
1295 * drm_agp_chipset_flush
1296 * 4. Updated (written) by CPU again
1298 * flush_domains = 0 (no previous write domain)
1299 * invalidate_domains = 0 (no new read domains)
1302 * flush_domains = CPU
1303 * invalidate_domains = RENDER
1306 * drm_agp_chipset_flush
1309 i915_gem_object_set_domain(struct drm_gem_object
*obj
,
1310 uint32_t read_domains
,
1311 uint32_t write_domain
)
1313 struct drm_device
*dev
= obj
->dev
;
1314 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1315 uint32_t invalidate_domains
= 0;
1316 uint32_t flush_domains
= 0;
1320 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1322 obj
->read_domains
, read_domains
,
1323 obj
->write_domain
, write_domain
);
1326 * If the object isn't moving to a new write domain,
1327 * let the object stay in multiple read domains
1329 if (write_domain
== 0)
1330 read_domains
|= obj
->read_domains
;
1332 obj_priv
->dirty
= 1;
1335 * Flush the current write domain if
1336 * the new read domains don't match. Invalidate
1337 * any read domains which differ from the old
1340 if (obj
->write_domain
&& obj
->write_domain
!= read_domains
) {
1341 flush_domains
|= obj
->write_domain
;
1342 invalidate_domains
|= read_domains
& ~obj
->write_domain
;
1345 * Invalidate any read caches which may have
1346 * stale data. That is, any new read domains.
1348 invalidate_domains
|= read_domains
& ~obj
->read_domains
;
1349 if ((flush_domains
| invalidate_domains
) & I915_GEM_DOMAIN_CPU
) {
1351 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1352 __func__
, flush_domains
, invalidate_domains
);
1355 * If we're invaliding the CPU cache and flushing a GPU cache,
1356 * then pause for rendering so that the GPU caches will be
1357 * flushed before the cpu cache is invalidated
1359 if ((invalidate_domains
& I915_GEM_DOMAIN_CPU
) &&
1360 (flush_domains
& ~(I915_GEM_DOMAIN_CPU
|
1361 I915_GEM_DOMAIN_GTT
))) {
1362 ret
= i915_gem_object_wait_rendering(obj
);
1366 i915_gem_clflush_object(obj
);
1369 if ((write_domain
| flush_domains
) != 0)
1370 obj
->write_domain
= write_domain
;
1372 /* If we're invalidating the CPU domain, clear the per-page CPU
1373 * domain list as well.
1375 if (obj_priv
->page_cpu_valid
!= NULL
&&
1376 (write_domain
!= 0 ||
1377 read_domains
& I915_GEM_DOMAIN_CPU
)) {
1378 drm_free(obj_priv
->page_cpu_valid
, obj
->size
/ PAGE_SIZE
,
1380 obj_priv
->page_cpu_valid
= NULL
;
1382 obj
->read_domains
= read_domains
;
1384 dev
->invalidate_domains
|= invalidate_domains
;
1385 dev
->flush_domains
|= flush_domains
;
1387 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1389 obj
->read_domains
, obj
->write_domain
,
1390 dev
->invalidate_domains
, dev
->flush_domains
);
1396 * Set the read/write domain on a range of the object.
1398 * Currently only implemented for CPU reads, otherwise drops to normal
1399 * i915_gem_object_set_domain().
1402 i915_gem_object_set_domain_range(struct drm_gem_object
*obj
,
1405 uint32_t read_domains
,
1406 uint32_t write_domain
)
1408 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1411 if (obj
->read_domains
& I915_GEM_DOMAIN_CPU
)
1414 if (read_domains
!= I915_GEM_DOMAIN_CPU
||
1416 return i915_gem_object_set_domain(obj
,
1417 read_domains
, write_domain
);
1419 /* Wait on any GPU rendering to the object to be flushed. */
1420 if (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
| I915_GEM_DOMAIN_GTT
)) {
1421 ret
= i915_gem_object_wait_rendering(obj
);
1426 if (obj_priv
->page_cpu_valid
== NULL
) {
1427 obj_priv
->page_cpu_valid
= drm_calloc(1, obj
->size
/ PAGE_SIZE
,
1431 /* Flush the cache on any pages that are still invalid from the CPU's
1434 for (i
= offset
/ PAGE_SIZE
; i
<= (offset
+ size
- 1) / PAGE_SIZE
; i
++) {
1435 if (obj_priv
->page_cpu_valid
[i
])
1438 drm_clflush_pages(obj_priv
->page_list
+ i
, 1);
1440 obj_priv
->page_cpu_valid
[i
] = 1;
1447 * Once all of the objects have been set in the proper domain,
1448 * perform the necessary flush and invalidate operations.
1450 * Returns the write domains flushed, for use in flush tracking.
1453 i915_gem_dev_set_domain(struct drm_device
*dev
)
1455 uint32_t flush_domains
= dev
->flush_domains
;
1458 * Now that all the buffers are synced to the proper domains,
1459 * flush and invalidate the collected domains
1461 if (dev
->invalidate_domains
| dev
->flush_domains
) {
1463 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1465 dev
->invalidate_domains
,
1466 dev
->flush_domains
);
1469 dev
->invalidate_domains
,
1470 dev
->flush_domains
);
1471 dev
->invalidate_domains
= 0;
1472 dev
->flush_domains
= 0;
1475 return flush_domains
;
1479 * Pin an object to the GTT and evaluate the relocations landing in it.
1482 i915_gem_object_pin_and_relocate(struct drm_gem_object
*obj
,
1483 struct drm_file
*file_priv
,
1484 struct drm_i915_gem_exec_object
*entry
)
1486 struct drm_device
*dev
= obj
->dev
;
1487 struct drm_i915_gem_relocation_entry reloc
;
1488 struct drm_i915_gem_relocation_entry __user
*relocs
;
1489 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1491 uint32_t last_reloc_offset
= -1;
1492 void __iomem
*reloc_page
= NULL
;
1494 /* Choose the GTT offset for our buffer and put it there. */
1495 ret
= i915_gem_object_pin(obj
, (uint32_t) entry
->alignment
);
1499 entry
->offset
= obj_priv
->gtt_offset
;
1501 relocs
= (struct drm_i915_gem_relocation_entry __user
*)
1502 (uintptr_t) entry
->relocs_ptr
;
1503 /* Apply the relocations, using the GTT aperture to avoid cache
1504 * flushing requirements.
1506 for (i
= 0; i
< entry
->relocation_count
; i
++) {
1507 struct drm_gem_object
*target_obj
;
1508 struct drm_i915_gem_object
*target_obj_priv
;
1509 uint32_t reloc_val
, reloc_offset
;
1510 uint32_t __iomem
*reloc_entry
;
1512 ret
= copy_from_user(&reloc
, relocs
+ i
, sizeof(reloc
));
1514 i915_gem_object_unpin(obj
);
1518 target_obj
= drm_gem_object_lookup(obj
->dev
, file_priv
,
1519 reloc
.target_handle
);
1520 if (target_obj
== NULL
) {
1521 i915_gem_object_unpin(obj
);
1524 target_obj_priv
= target_obj
->driver_private
;
1526 /* The target buffer should have appeared before us in the
1527 * exec_object list, so it should have a GTT space bound by now.
1529 if (target_obj_priv
->gtt_space
== NULL
) {
1530 DRM_ERROR("No GTT space found for object %d\n",
1531 reloc
.target_handle
);
1532 drm_gem_object_unreference(target_obj
);
1533 i915_gem_object_unpin(obj
);
1537 if (reloc
.offset
> obj
->size
- 4) {
1538 DRM_ERROR("Relocation beyond object bounds: "
1539 "obj %p target %d offset %d size %d.\n",
1540 obj
, reloc
.target_handle
,
1541 (int) reloc
.offset
, (int) obj
->size
);
1542 drm_gem_object_unreference(target_obj
);
1543 i915_gem_object_unpin(obj
);
1546 if (reloc
.offset
& 3) {
1547 DRM_ERROR("Relocation not 4-byte aligned: "
1548 "obj %p target %d offset %d.\n",
1549 obj
, reloc
.target_handle
,
1550 (int) reloc
.offset
);
1551 drm_gem_object_unreference(target_obj
);
1552 i915_gem_object_unpin(obj
);
1556 if (reloc
.write_domain
&& target_obj
->pending_write_domain
&&
1557 reloc
.write_domain
!= target_obj
->pending_write_domain
) {
1558 DRM_ERROR("Write domain conflict: "
1559 "obj %p target %d offset %d "
1560 "new %08x old %08x\n",
1561 obj
, reloc
.target_handle
,
1564 target_obj
->pending_write_domain
);
1565 drm_gem_object_unreference(target_obj
);
1566 i915_gem_object_unpin(obj
);
1571 DRM_INFO("%s: obj %p offset %08x target %d "
1572 "read %08x write %08x gtt %08x "
1573 "presumed %08x delta %08x\n",
1577 (int) reloc
.target_handle
,
1578 (int) reloc
.read_domains
,
1579 (int) reloc
.write_domain
,
1580 (int) target_obj_priv
->gtt_offset
,
1581 (int) reloc
.presumed_offset
,
1585 target_obj
->pending_read_domains
|= reloc
.read_domains
;
1586 target_obj
->pending_write_domain
|= reloc
.write_domain
;
1588 /* If the relocation already has the right value in it, no
1589 * more work needs to be done.
1591 if (target_obj_priv
->gtt_offset
== reloc
.presumed_offset
) {
1592 drm_gem_object_unreference(target_obj
);
1596 /* Now that we're going to actually write some data in,
1597 * make sure that any rendering using this buffer's contents
1600 i915_gem_object_wait_rendering(obj
);
1602 /* As we're writing through the gtt, flush
1603 * any CPU writes before we write the relocations
1605 if (obj
->write_domain
& I915_GEM_DOMAIN_CPU
) {
1606 i915_gem_clflush_object(obj
);
1607 drm_agp_chipset_flush(dev
);
1608 obj
->write_domain
= 0;
1611 /* Map the page containing the relocation we're going to
1614 reloc_offset
= obj_priv
->gtt_offset
+ reloc
.offset
;
1615 if (reloc_page
== NULL
||
1616 (last_reloc_offset
& ~(PAGE_SIZE
- 1)) !=
1617 (reloc_offset
& ~(PAGE_SIZE
- 1))) {
1618 if (reloc_page
!= NULL
)
1619 iounmap(reloc_page
);
1621 reloc_page
= ioremap_wc(dev
->agp
->base
+
1625 last_reloc_offset
= reloc_offset
;
1626 if (reloc_page
== NULL
) {
1627 drm_gem_object_unreference(target_obj
);
1628 i915_gem_object_unpin(obj
);
1633 reloc_entry
= (uint32_t __iomem
*)(reloc_page
+
1634 (reloc_offset
& (PAGE_SIZE
- 1)));
1635 reloc_val
= target_obj_priv
->gtt_offset
+ reloc
.delta
;
1638 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1639 obj
, (unsigned int) reloc
.offset
,
1640 readl(reloc_entry
), reloc_val
);
1642 writel(reloc_val
, reloc_entry
);
1644 /* Write the updated presumed offset for this entry back out
1647 reloc
.presumed_offset
= target_obj_priv
->gtt_offset
;
1648 ret
= copy_to_user(relocs
+ i
, &reloc
, sizeof(reloc
));
1650 drm_gem_object_unreference(target_obj
);
1651 i915_gem_object_unpin(obj
);
1655 drm_gem_object_unreference(target_obj
);
1658 if (reloc_page
!= NULL
)
1659 iounmap(reloc_page
);
1663 i915_gem_dump_object(obj
, 128, __func__
, ~0);
1668 /** Dispatch a batchbuffer to the ring
1671 i915_dispatch_gem_execbuffer(struct drm_device
*dev
,
1672 struct drm_i915_gem_execbuffer
*exec
,
1673 uint64_t exec_offset
)
1675 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1676 struct drm_clip_rect __user
*boxes
= (struct drm_clip_rect __user
*)
1677 (uintptr_t) exec
->cliprects_ptr
;
1678 int nbox
= exec
->num_cliprects
;
1680 uint32_t exec_start
, exec_len
;
1683 exec_start
= (uint32_t) exec_offset
+ exec
->batch_start_offset
;
1684 exec_len
= (uint32_t) exec
->batch_len
;
1686 if ((exec_start
| exec_len
) & 0x7) {
1687 DRM_ERROR("alignment\n");
1694 count
= nbox
? nbox
: 1;
1696 for (i
= 0; i
< count
; i
++) {
1698 int ret
= i915_emit_box(dev
, boxes
, i
,
1699 exec
->DR1
, exec
->DR4
);
1704 if (IS_I830(dev
) || IS_845G(dev
)) {
1706 OUT_RING(MI_BATCH_BUFFER
);
1707 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
1708 OUT_RING(exec_start
+ exec_len
- 4);
1713 if (IS_I965G(dev
)) {
1714 OUT_RING(MI_BATCH_BUFFER_START
|
1716 MI_BATCH_NON_SECURE_I965
);
1717 OUT_RING(exec_start
);
1719 OUT_RING(MI_BATCH_BUFFER_START
|
1721 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
1727 /* XXX breadcrumb */
1731 /* Throttle our rendering by waiting until the ring has completed our requests
1732 * emitted over 20 msec ago.
1734 * This should get us reasonable parallelism between CPU and GPU but also
1735 * relatively low latency when blocking on a particular request to finish.
1738 i915_gem_ring_throttle(struct drm_device
*dev
, struct drm_file
*file_priv
)
1740 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
1744 mutex_lock(&dev
->struct_mutex
);
1745 seqno
= i915_file_priv
->mm
.last_gem_throttle_seqno
;
1746 i915_file_priv
->mm
.last_gem_throttle_seqno
=
1747 i915_file_priv
->mm
.last_gem_seqno
;
1749 ret
= i915_wait_request(dev
, seqno
);
1750 mutex_unlock(&dev
->struct_mutex
);
1755 i915_gem_execbuffer(struct drm_device
*dev
, void *data
,
1756 struct drm_file
*file_priv
)
1758 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1759 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
1760 struct drm_i915_gem_execbuffer
*args
= data
;
1761 struct drm_i915_gem_exec_object
*exec_list
= NULL
;
1762 struct drm_gem_object
**object_list
= NULL
;
1763 struct drm_gem_object
*batch_obj
;
1764 int ret
, i
, pinned
= 0;
1765 uint64_t exec_offset
;
1766 uint32_t seqno
, flush_domains
;
1769 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1770 (int) args
->buffers_ptr
, args
->buffer_count
, args
->batch_len
);
1773 if (args
->buffer_count
< 1) {
1774 DRM_ERROR("execbuf with %d buffers\n", args
->buffer_count
);
1777 /* Copy in the exec list from userland */
1778 exec_list
= drm_calloc(sizeof(*exec_list
), args
->buffer_count
,
1780 object_list
= drm_calloc(sizeof(*object_list
), args
->buffer_count
,
1782 if (exec_list
== NULL
|| object_list
== NULL
) {
1783 DRM_ERROR("Failed to allocate exec or object list "
1785 args
->buffer_count
);
1789 ret
= copy_from_user(exec_list
,
1790 (struct drm_i915_relocation_entry __user
*)
1791 (uintptr_t) args
->buffers_ptr
,
1792 sizeof(*exec_list
) * args
->buffer_count
);
1794 DRM_ERROR("copy %d exec entries failed %d\n",
1795 args
->buffer_count
, ret
);
1799 mutex_lock(&dev
->struct_mutex
);
1801 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1803 if (dev_priv
->mm
.wedged
) {
1804 DRM_ERROR("Execbuf while wedged\n");
1805 mutex_unlock(&dev
->struct_mutex
);
1809 if (dev_priv
->mm
.suspended
) {
1810 DRM_ERROR("Execbuf while VT-switched.\n");
1811 mutex_unlock(&dev
->struct_mutex
);
1815 /* Zero the gloabl flush/invalidate flags. These
1816 * will be modified as each object is bound to the
1819 dev
->invalidate_domains
= 0;
1820 dev
->flush_domains
= 0;
1822 /* Look up object handles and perform the relocations */
1823 for (i
= 0; i
< args
->buffer_count
; i
++) {
1824 object_list
[i
] = drm_gem_object_lookup(dev
, file_priv
,
1825 exec_list
[i
].handle
);
1826 if (object_list
[i
] == NULL
) {
1827 DRM_ERROR("Invalid object handle %d at index %d\n",
1828 exec_list
[i
].handle
, i
);
1833 object_list
[i
]->pending_read_domains
= 0;
1834 object_list
[i
]->pending_write_domain
= 0;
1835 ret
= i915_gem_object_pin_and_relocate(object_list
[i
],
1839 DRM_ERROR("object bind and relocate failed %d\n", ret
);
1845 /* Set the pending read domains for the batch buffer to COMMAND */
1846 batch_obj
= object_list
[args
->buffer_count
-1];
1847 batch_obj
->pending_read_domains
= I915_GEM_DOMAIN_COMMAND
;
1848 batch_obj
->pending_write_domain
= 0;
1850 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1852 for (i
= 0; i
< args
->buffer_count
; i
++) {
1853 struct drm_gem_object
*obj
= object_list
[i
];
1854 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1856 if (obj_priv
->gtt_space
== NULL
) {
1857 /* We evicted the buffer in the process of validating
1858 * our set of buffers in. We could try to recover by
1859 * kicking them everything out and trying again from
1866 /* make sure all previous memory operations have passed */
1867 ret
= i915_gem_object_set_domain(obj
,
1868 obj
->pending_read_domains
,
1869 obj
->pending_write_domain
);
1874 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1876 /* Flush/invalidate caches and chipset buffer */
1877 flush_domains
= i915_gem_dev_set_domain(dev
);
1879 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1882 for (i
= 0; i
< args
->buffer_count
; i
++) {
1883 i915_gem_object_check_coherency(object_list
[i
],
1884 exec_list
[i
].handle
);
1888 exec_offset
= exec_list
[args
->buffer_count
- 1].offset
;
1891 i915_gem_dump_object(object_list
[args
->buffer_count
- 1],
1897 (void)i915_add_request(dev
, flush_domains
);
1899 /* Exec the batchbuffer */
1900 ret
= i915_dispatch_gem_execbuffer(dev
, args
, exec_offset
);
1902 DRM_ERROR("dispatch failed %d\n", ret
);
1907 * Ensure that the commands in the batch buffer are
1908 * finished before the interrupt fires
1910 flush_domains
= i915_retire_commands(dev
);
1912 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1915 * Get a seqno representing the execution of the current buffer,
1916 * which we can wait on. We would like to mitigate these interrupts,
1917 * likely by only creating seqnos occasionally (so that we have
1918 * *some* interrupts representing completion of buffers that we can
1919 * wait on when trying to clear up gtt space).
1921 seqno
= i915_add_request(dev
, flush_domains
);
1923 i915_file_priv
->mm
.last_gem_seqno
= seqno
;
1924 for (i
= 0; i
< args
->buffer_count
; i
++) {
1925 struct drm_gem_object
*obj
= object_list
[i
];
1926 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1928 i915_gem_object_move_to_active(obj
);
1929 obj_priv
->last_rendering_seqno
= seqno
;
1931 DRM_INFO("%s: move to exec list %p\n", __func__
, obj
);
1935 i915_dump_lru(dev
, __func__
);
1938 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1940 /* Copy the new buffer offsets back to the user's exec list. */
1941 ret
= copy_to_user((struct drm_i915_relocation_entry __user
*)
1942 (uintptr_t) args
->buffers_ptr
,
1944 sizeof(*exec_list
) * args
->buffer_count
);
1946 DRM_ERROR("failed to copy %d exec entries "
1947 "back to user (%d)\n",
1948 args
->buffer_count
, ret
);
1950 if (object_list
!= NULL
) {
1951 for (i
= 0; i
< pinned
; i
++)
1952 i915_gem_object_unpin(object_list
[i
]);
1954 for (i
= 0; i
< args
->buffer_count
; i
++)
1955 drm_gem_object_unreference(object_list
[i
]);
1957 mutex_unlock(&dev
->struct_mutex
);
1960 drm_free(object_list
, sizeof(*object_list
) * args
->buffer_count
,
1962 drm_free(exec_list
, sizeof(*exec_list
) * args
->buffer_count
,
1969 i915_gem_object_pin(struct drm_gem_object
*obj
, uint32_t alignment
)
1971 struct drm_device
*dev
= obj
->dev
;
1972 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1975 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1976 if (obj_priv
->gtt_space
== NULL
) {
1977 ret
= i915_gem_object_bind_to_gtt(obj
, alignment
);
1979 DRM_ERROR("Failure to bind: %d", ret
);
1983 obj_priv
->pin_count
++;
1985 /* If the object is not active and not pending a flush,
1986 * remove it from the inactive list
1988 if (obj_priv
->pin_count
== 1) {
1989 atomic_inc(&dev
->pin_count
);
1990 atomic_add(obj
->size
, &dev
->pin_memory
);
1991 if (!obj_priv
->active
&&
1992 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
1993 I915_GEM_DOMAIN_GTT
)) == 0 &&
1994 !list_empty(&obj_priv
->list
))
1995 list_del_init(&obj_priv
->list
);
1997 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2003 i915_gem_object_unpin(struct drm_gem_object
*obj
)
2005 struct drm_device
*dev
= obj
->dev
;
2006 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2007 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2009 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2010 obj_priv
->pin_count
--;
2011 BUG_ON(obj_priv
->pin_count
< 0);
2012 BUG_ON(obj_priv
->gtt_space
== NULL
);
2014 /* If the object is no longer pinned, and is
2015 * neither active nor being flushed, then stick it on
2018 if (obj_priv
->pin_count
== 0) {
2019 if (!obj_priv
->active
&&
2020 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2021 I915_GEM_DOMAIN_GTT
)) == 0)
2022 list_move_tail(&obj_priv
->list
,
2023 &dev_priv
->mm
.inactive_list
);
2024 atomic_dec(&dev
->pin_count
);
2025 atomic_sub(obj
->size
, &dev
->pin_memory
);
2027 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2031 i915_gem_pin_ioctl(struct drm_device
*dev
, void *data
,
2032 struct drm_file
*file_priv
)
2034 struct drm_i915_gem_pin
*args
= data
;
2035 struct drm_gem_object
*obj
;
2036 struct drm_i915_gem_object
*obj_priv
;
2039 mutex_lock(&dev
->struct_mutex
);
2041 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2043 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2045 mutex_unlock(&dev
->struct_mutex
);
2048 obj_priv
= obj
->driver_private
;
2050 ret
= i915_gem_object_pin(obj
, args
->alignment
);
2052 drm_gem_object_unreference(obj
);
2053 mutex_unlock(&dev
->struct_mutex
);
2057 /* XXX - flush the CPU caches for pinned objects
2058 * as the X server doesn't manage domains yet
2060 if (obj
->write_domain
& I915_GEM_DOMAIN_CPU
) {
2061 i915_gem_clflush_object(obj
);
2062 drm_agp_chipset_flush(dev
);
2063 obj
->write_domain
= 0;
2065 args
->offset
= obj_priv
->gtt_offset
;
2066 drm_gem_object_unreference(obj
);
2067 mutex_unlock(&dev
->struct_mutex
);
2073 i915_gem_unpin_ioctl(struct drm_device
*dev
, void *data
,
2074 struct drm_file
*file_priv
)
2076 struct drm_i915_gem_pin
*args
= data
;
2077 struct drm_gem_object
*obj
;
2079 mutex_lock(&dev
->struct_mutex
);
2081 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2083 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2085 mutex_unlock(&dev
->struct_mutex
);
2089 i915_gem_object_unpin(obj
);
2091 drm_gem_object_unreference(obj
);
2092 mutex_unlock(&dev
->struct_mutex
);
2097 i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
2098 struct drm_file
*file_priv
)
2100 struct drm_i915_gem_busy
*args
= data
;
2101 struct drm_gem_object
*obj
;
2102 struct drm_i915_gem_object
*obj_priv
;
2104 mutex_lock(&dev
->struct_mutex
);
2105 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2107 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2109 mutex_unlock(&dev
->struct_mutex
);
2113 obj_priv
= obj
->driver_private
;
2114 args
->busy
= obj_priv
->active
;
2116 drm_gem_object_unreference(obj
);
2117 mutex_unlock(&dev
->struct_mutex
);
2122 i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
2123 struct drm_file
*file_priv
)
2125 return i915_gem_ring_throttle(dev
, file_priv
);
2128 int i915_gem_init_object(struct drm_gem_object
*obj
)
2130 struct drm_i915_gem_object
*obj_priv
;
2132 obj_priv
= drm_calloc(1, sizeof(*obj_priv
), DRM_MEM_DRIVER
);
2133 if (obj_priv
== NULL
)
2137 * We've just allocated pages from the kernel,
2138 * so they've just been written by the CPU with
2139 * zeros. They'll need to be clflushed before we
2140 * use them with the GPU.
2142 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
2143 obj
->read_domains
= I915_GEM_DOMAIN_CPU
;
2145 obj
->driver_private
= obj_priv
;
2146 obj_priv
->obj
= obj
;
2147 INIT_LIST_HEAD(&obj_priv
->list
);
2151 void i915_gem_free_object(struct drm_gem_object
*obj
)
2153 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2155 while (obj_priv
->pin_count
> 0)
2156 i915_gem_object_unpin(obj
);
2158 i915_gem_object_unbind(obj
);
2160 drm_free(obj_priv
->page_cpu_valid
, 1, DRM_MEM_DRIVER
);
2161 drm_free(obj
->driver_private
, 1, DRM_MEM_DRIVER
);
2165 i915_gem_set_domain(struct drm_gem_object
*obj
,
2166 struct drm_file
*file_priv
,
2167 uint32_t read_domains
,
2168 uint32_t write_domain
)
2170 struct drm_device
*dev
= obj
->dev
;
2172 uint32_t flush_domains
;
2174 BUG_ON(!mutex_is_locked(&dev
->struct_mutex
));
2176 ret
= i915_gem_object_set_domain(obj
, read_domains
, write_domain
);
2179 flush_domains
= i915_gem_dev_set_domain(obj
->dev
);
2181 if (flush_domains
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
))
2182 (void) i915_add_request(dev
, flush_domains
);
2187 /** Unbinds all objects that are on the given buffer list. */
2189 i915_gem_evict_from_list(struct drm_device
*dev
, struct list_head
*head
)
2191 struct drm_gem_object
*obj
;
2192 struct drm_i915_gem_object
*obj_priv
;
2195 while (!list_empty(head
)) {
2196 obj_priv
= list_first_entry(head
,
2197 struct drm_i915_gem_object
,
2199 obj
= obj_priv
->obj
;
2201 if (obj_priv
->pin_count
!= 0) {
2202 DRM_ERROR("Pinned object in unbind list\n");
2203 mutex_unlock(&dev
->struct_mutex
);
2207 ret
= i915_gem_object_unbind(obj
);
2209 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2211 mutex_unlock(&dev
->struct_mutex
);
2221 i915_gem_idle(struct drm_device
*dev
)
2223 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2224 uint32_t seqno
, cur_seqno
, last_seqno
;
2227 if (dev_priv
->mm
.suspended
)
2230 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2231 * We need to replace this with a semaphore, or something.
2233 dev_priv
->mm
.suspended
= 1;
2235 i915_kernel_lost_context(dev
);
2237 /* Flush the GPU along with all non-CPU write domains
2239 i915_gem_flush(dev
, ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
),
2240 ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
2241 seqno
= i915_add_request(dev
, ~(I915_GEM_DOMAIN_CPU
|
2242 I915_GEM_DOMAIN_GTT
));
2245 mutex_unlock(&dev
->struct_mutex
);
2249 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
2253 cur_seqno
= i915_get_gem_seqno(dev
);
2254 if (i915_seqno_passed(cur_seqno
, seqno
))
2256 if (last_seqno
== cur_seqno
) {
2257 if (stuck
++ > 100) {
2258 DRM_ERROR("hardware wedged\n");
2259 dev_priv
->mm
.wedged
= 1;
2260 DRM_WAKEUP(&dev_priv
->irq_queue
);
2265 last_seqno
= cur_seqno
;
2267 dev_priv
->mm
.waiting_gem_seqno
= 0;
2269 i915_gem_retire_requests(dev
);
2271 /* Active and flushing should now be empty as we've
2272 * waited for a sequence higher than any pending execbuffer
2274 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
2275 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2277 /* Request should now be empty as we've also waited
2278 * for the last request in the list
2280 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
2282 /* Move all buffers out of the GTT. */
2283 ret
= i915_gem_evict_from_list(dev
, &dev_priv
->mm
.inactive_list
);
2287 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
2288 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2289 BUG_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
2290 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
2295 i915_gem_init_hws(struct drm_device
*dev
)
2297 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2298 struct drm_gem_object
*obj
;
2299 struct drm_i915_gem_object
*obj_priv
;
2302 /* If we need a physical address for the status page, it's already
2303 * initialized at driver load time.
2305 if (!I915_NEED_GFX_HWS(dev
))
2308 obj
= drm_gem_object_alloc(dev
, 4096);
2310 DRM_ERROR("Failed to allocate status page\n");
2313 obj_priv
= obj
->driver_private
;
2315 ret
= i915_gem_object_pin(obj
, 4096);
2317 drm_gem_object_unreference(obj
);
2321 dev_priv
->status_gfx_addr
= obj_priv
->gtt_offset
;
2322 dev_priv
->hws_map
.offset
= dev
->agp
->base
+ obj_priv
->gtt_offset
;
2323 dev_priv
->hws_map
.size
= 4096;
2324 dev_priv
->hws_map
.type
= 0;
2325 dev_priv
->hws_map
.flags
= 0;
2326 dev_priv
->hws_map
.mtrr
= 0;
2328 /* Ioremapping here is the wrong thing to do. We want cached access.
2330 drm_core_ioremap_wc(&dev_priv
->hws_map
, dev
);
2331 if (dev_priv
->hws_map
.handle
== NULL
) {
2332 DRM_ERROR("Failed to map status page.\n");
2333 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
2334 drm_gem_object_unreference(obj
);
2337 dev_priv
->hws_obj
= obj
;
2338 dev_priv
->hw_status_page
= dev_priv
->hws_map
.handle
;
2339 memset(dev_priv
->hw_status_page
, 0, PAGE_SIZE
);
2340 I915_WRITE(HWS_PGA
, dev_priv
->status_gfx_addr
);
2341 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv
->status_gfx_addr
);
2347 i915_gem_init_ringbuffer(struct drm_device
*dev
)
2349 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2350 struct drm_gem_object
*obj
;
2351 struct drm_i915_gem_object
*obj_priv
;
2354 ret
= i915_gem_init_hws(dev
);
2358 obj
= drm_gem_object_alloc(dev
, 128 * 1024);
2360 DRM_ERROR("Failed to allocate ringbuffer\n");
2363 obj_priv
= obj
->driver_private
;
2365 ret
= i915_gem_object_pin(obj
, 4096);
2367 drm_gem_object_unreference(obj
);
2371 /* Set up the kernel mapping for the ring. */
2372 dev_priv
->ring
.Size
= obj
->size
;
2373 dev_priv
->ring
.tail_mask
= obj
->size
- 1;
2375 dev_priv
->ring
.map
.offset
= dev
->agp
->base
+ obj_priv
->gtt_offset
;
2376 dev_priv
->ring
.map
.size
= obj
->size
;
2377 dev_priv
->ring
.map
.type
= 0;
2378 dev_priv
->ring
.map
.flags
= 0;
2379 dev_priv
->ring
.map
.mtrr
= 0;
2381 drm_core_ioremap_wc(&dev_priv
->ring
.map
, dev
);
2382 if (dev_priv
->ring
.map
.handle
== NULL
) {
2383 DRM_ERROR("Failed to map ringbuffer.\n");
2384 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
2385 drm_gem_object_unreference(obj
);
2388 dev_priv
->ring
.ring_obj
= obj
;
2389 dev_priv
->ring
.virtual_start
= dev_priv
->ring
.map
.handle
;
2391 /* Stop the ring if it's running. */
2392 I915_WRITE(PRB0_CTL
, 0);
2393 I915_WRITE(PRB0_HEAD
, 0);
2394 I915_WRITE(PRB0_TAIL
, 0);
2395 I915_WRITE(PRB0_START
, 0);
2397 /* Initialize the ring. */
2398 I915_WRITE(PRB0_START
, obj_priv
->gtt_offset
);
2399 I915_WRITE(PRB0_CTL
,
2400 ((obj
->size
- 4096) & RING_NR_PAGES
) |
2404 /* Update our cache of the ring state */
2405 i915_kernel_lost_context(dev
);
2411 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
)
2413 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2415 if (dev_priv
->ring
.ring_obj
== NULL
)
2418 drm_core_ioremapfree(&dev_priv
->ring
.map
, dev
);
2420 i915_gem_object_unpin(dev_priv
->ring
.ring_obj
);
2421 drm_gem_object_unreference(dev_priv
->ring
.ring_obj
);
2422 dev_priv
->ring
.ring_obj
= NULL
;
2423 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
2425 if (dev_priv
->hws_obj
!= NULL
) {
2426 i915_gem_object_unpin(dev_priv
->hws_obj
);
2427 drm_gem_object_unreference(dev_priv
->hws_obj
);
2428 dev_priv
->hws_obj
= NULL
;
2429 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
2431 /* Write high address into HWS_PGA when disabling. */
2432 I915_WRITE(HWS_PGA
, 0x1ffff000);
2437 i915_gem_entervt_ioctl(struct drm_device
*dev
, void *data
,
2438 struct drm_file
*file_priv
)
2440 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2443 if (dev_priv
->mm
.wedged
) {
2444 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2445 dev_priv
->mm
.wedged
= 0;
2448 ret
= i915_gem_init_ringbuffer(dev
);
2452 mutex_lock(&dev
->struct_mutex
);
2453 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
2454 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2455 BUG_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
2456 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
2457 dev_priv
->mm
.suspended
= 0;
2458 mutex_unlock(&dev
->struct_mutex
);
2460 drm_irq_install(dev
);
2466 i915_gem_leavevt_ioctl(struct drm_device
*dev
, void *data
,
2467 struct drm_file
*file_priv
)
2471 mutex_lock(&dev
->struct_mutex
);
2472 ret
= i915_gem_idle(dev
);
2474 i915_gem_cleanup_ringbuffer(dev
);
2475 mutex_unlock(&dev
->struct_mutex
);
2477 drm_irq_uninstall(dev
);
2483 i915_gem_lastclose(struct drm_device
*dev
)
2486 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2488 mutex_lock(&dev
->struct_mutex
);
2490 if (dev_priv
->ring
.ring_obj
!= NULL
) {
2491 ret
= i915_gem_idle(dev
);
2493 DRM_ERROR("failed to idle hardware: %d\n", ret
);
2495 i915_gem_cleanup_ringbuffer(dev
);
2498 mutex_unlock(&dev
->struct_mutex
);
2502 i915_gem_load(struct drm_device
*dev
)
2504 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2506 INIT_LIST_HEAD(&dev_priv
->mm
.active_list
);
2507 INIT_LIST_HEAD(&dev_priv
->mm
.flushing_list
);
2508 INIT_LIST_HEAD(&dev_priv
->mm
.inactive_list
);
2509 INIT_LIST_HEAD(&dev_priv
->mm
.request_list
);
2510 INIT_DELAYED_WORK(&dev_priv
->mm
.retire_work
,
2511 i915_gem_retire_work_handler
);
2512 INIT_WORK(&dev_priv
->mm
.vblank_work
,
2513 i915_gem_vblank_work_handler
);
2514 dev_priv
->mm
.next_gem_seqno
= 1;
2516 i915_gem_detect_bit_6_swizzle(dev
);