drm/i915: dynamic render p-state support for Sandy Bridge
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b
EA
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
1c5d22f7 32#include "i915_trace.h"
652c393a 33#include "intel_drv.h"
5a0e3ad6 34#include <linux/slab.h>
673a394b 35#include <linux/swap.h>
79e53945 36#include <linux/pci.h>
673a394b 37
3619df03 38static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
05394f39
CW
39static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
41static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
919926ae 42 bool write);
05394f39 43static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
e47c68e9
EA
44 uint64_t offset,
45 uint64_t size);
05394f39 46static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
05394f39 47static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
a00b10c3 48 unsigned alignment,
75e9e915 49 bool map_and_fenceable);
d9e86c0e
CW
50static void i915_gem_clear_fence_reg(struct drm_device *dev,
51 struct drm_i915_fence_reg *reg);
05394f39
CW
52static int i915_gem_phys_pwrite(struct drm_device *dev,
53 struct drm_i915_gem_object *obj,
71acb5eb 54 struct drm_i915_gem_pwrite *args,
05394f39
CW
55 struct drm_file *file);
56static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
673a394b 57
17250b71
CW
58static int i915_gem_inactive_shrink(struct shrinker *shrinker,
59 int nr_to_scan,
60 gfp_t gfp_mask);
61
31169714 62
73aa808f
CW
63/* some bookkeeping */
64static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
65 size_t size)
66{
67 dev_priv->mm.object_count++;
68 dev_priv->mm.object_memory += size;
69}
70
71static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
72 size_t size)
73{
74 dev_priv->mm.object_count--;
75 dev_priv->mm.object_memory -= size;
76}
77
30dbf0c0
CW
78int
79i915_gem_check_is_wedged(struct drm_device *dev)
80{
81 struct drm_i915_private *dev_priv = dev->dev_private;
82 struct completion *x = &dev_priv->error_completion;
83 unsigned long flags;
84 int ret;
85
86 if (!atomic_read(&dev_priv->mm.wedged))
87 return 0;
88
89 ret = wait_for_completion_interruptible(x);
90 if (ret)
91 return ret;
92
93 /* Success, we reset the GPU! */
94 if (!atomic_read(&dev_priv->mm.wedged))
95 return 0;
96
97 /* GPU is hung, bump the completion count to account for
98 * the token we just consumed so that we never hit zero and
99 * end up waiting upon a subsequent completion event that
100 * will never happen.
101 */
102 spin_lock_irqsave(&x->wait.lock, flags);
103 x->done++;
104 spin_unlock_irqrestore(&x->wait.lock, flags);
105 return -EIO;
106}
107
54cf91dc 108int i915_mutex_lock_interruptible(struct drm_device *dev)
76c1dec1
CW
109{
110 struct drm_i915_private *dev_priv = dev->dev_private;
111 int ret;
112
113 ret = i915_gem_check_is_wedged(dev);
114 if (ret)
115 return ret;
116
117 ret = mutex_lock_interruptible(&dev->struct_mutex);
118 if (ret)
119 return ret;
120
121 if (atomic_read(&dev_priv->mm.wedged)) {
122 mutex_unlock(&dev->struct_mutex);
123 return -EAGAIN;
124 }
125
23bc5982 126 WARN_ON(i915_verify_lists(dev));
76c1dec1
CW
127 return 0;
128}
30dbf0c0 129
7d1c4804 130static inline bool
05394f39 131i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
7d1c4804 132{
05394f39 133 return obj->gtt_space && !obj->active && obj->pin_count == 0;
7d1c4804
CW
134}
135
2021746e
CW
136void i915_gem_do_init(struct drm_device *dev,
137 unsigned long start,
138 unsigned long mappable_end,
139 unsigned long end)
673a394b
EA
140{
141 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 142
79e53945
JB
143 drm_mm_init(&dev_priv->mm.gtt_space, start,
144 end - start);
673a394b 145
73aa808f 146 dev_priv->mm.gtt_total = end - start;
fb7d516a 147 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
53984635 148 dev_priv->mm.gtt_mappable_end = mappable_end;
79e53945 149}
673a394b 150
79e53945
JB
151int
152i915_gem_init_ioctl(struct drm_device *dev, void *data,
05394f39 153 struct drm_file *file)
79e53945
JB
154{
155 struct drm_i915_gem_init *args = data;
2021746e
CW
156
157 if (args->gtt_start >= args->gtt_end ||
158 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
159 return -EINVAL;
79e53945
JB
160
161 mutex_lock(&dev->struct_mutex);
2021746e 162 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
673a394b
EA
163 mutex_unlock(&dev->struct_mutex);
164
2021746e 165 return 0;
673a394b
EA
166}
167
5a125c3c
EA
168int
169i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
05394f39 170 struct drm_file *file)
5a125c3c 171{
73aa808f 172 struct drm_i915_private *dev_priv = dev->dev_private;
5a125c3c 173 struct drm_i915_gem_get_aperture *args = data;
6299f992
CW
174 struct drm_i915_gem_object *obj;
175 size_t pinned;
5a125c3c
EA
176
177 if (!(dev->driver->driver_features & DRIVER_GEM))
178 return -ENODEV;
179
6299f992 180 pinned = 0;
73aa808f 181 mutex_lock(&dev->struct_mutex);
6299f992
CW
182 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
183 pinned += obj->gtt_space->size;
73aa808f 184 mutex_unlock(&dev->struct_mutex);
5a125c3c 185
6299f992
CW
186 args->aper_size = dev_priv->mm.gtt_total;
187 args->aper_available_size = args->aper_size -pinned;
188
5a125c3c
EA
189 return 0;
190}
191
673a394b
EA
192/**
193 * Creates a new mm object and returns a handle to it.
194 */
195int
196i915_gem_create_ioctl(struct drm_device *dev, void *data,
05394f39 197 struct drm_file *file)
673a394b
EA
198{
199 struct drm_i915_gem_create *args = data;
05394f39 200 struct drm_i915_gem_object *obj;
a1a2d1d3
PP
201 int ret;
202 u32 handle;
673a394b
EA
203
204 args->size = roundup(args->size, PAGE_SIZE);
205
206 /* Allocate the new object */
ac52bc56 207 obj = i915_gem_alloc_object(dev, args->size);
673a394b
EA
208 if (obj == NULL)
209 return -ENOMEM;
210
05394f39 211 ret = drm_gem_handle_create(file, &obj->base, &handle);
1dfd9754 212 if (ret) {
05394f39
CW
213 drm_gem_object_release(&obj->base);
214 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
202f2fef 215 kfree(obj);
673a394b 216 return ret;
1dfd9754 217 }
673a394b 218
202f2fef 219 /* drop reference from allocate - handle holds it now */
05394f39 220 drm_gem_object_unreference(&obj->base);
202f2fef
CW
221 trace_i915_gem_object_create(obj);
222
1dfd9754 223 args->handle = handle;
673a394b
EA
224 return 0;
225}
226
05394f39 227static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
280b713b 228{
05394f39 229 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
280b713b
EA
230
231 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
05394f39 232 obj->tiling_mode != I915_TILING_NONE;
280b713b
EA
233}
234
99a03df5 235static inline void
40123c1f
EA
236slow_shmem_copy(struct page *dst_page,
237 int dst_offset,
238 struct page *src_page,
239 int src_offset,
240 int length)
241{
242 char *dst_vaddr, *src_vaddr;
243
99a03df5
CW
244 dst_vaddr = kmap(dst_page);
245 src_vaddr = kmap(src_page);
40123c1f
EA
246
247 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
248
99a03df5
CW
249 kunmap(src_page);
250 kunmap(dst_page);
40123c1f
EA
251}
252
99a03df5 253static inline void
280b713b
EA
254slow_shmem_bit17_copy(struct page *gpu_page,
255 int gpu_offset,
256 struct page *cpu_page,
257 int cpu_offset,
258 int length,
259 int is_read)
260{
261 char *gpu_vaddr, *cpu_vaddr;
262
263 /* Use the unswizzled path if this page isn't affected. */
264 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
265 if (is_read)
266 return slow_shmem_copy(cpu_page, cpu_offset,
267 gpu_page, gpu_offset, length);
268 else
269 return slow_shmem_copy(gpu_page, gpu_offset,
270 cpu_page, cpu_offset, length);
271 }
272
99a03df5
CW
273 gpu_vaddr = kmap(gpu_page);
274 cpu_vaddr = kmap(cpu_page);
280b713b
EA
275
276 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
277 * XORing with the other bits (A9 for Y, A9 and A10 for X)
278 */
279 while (length > 0) {
280 int cacheline_end = ALIGN(gpu_offset + 1, 64);
281 int this_length = min(cacheline_end - gpu_offset, length);
282 int swizzled_gpu_offset = gpu_offset ^ 64;
283
284 if (is_read) {
285 memcpy(cpu_vaddr + cpu_offset,
286 gpu_vaddr + swizzled_gpu_offset,
287 this_length);
288 } else {
289 memcpy(gpu_vaddr + swizzled_gpu_offset,
290 cpu_vaddr + cpu_offset,
291 this_length);
292 }
293 cpu_offset += this_length;
294 gpu_offset += this_length;
295 length -= this_length;
296 }
297
99a03df5
CW
298 kunmap(cpu_page);
299 kunmap(gpu_page);
280b713b
EA
300}
301
eb01459f
EA
302/**
303 * This is the fast shmem pread path, which attempts to copy_from_user directly
304 * from the backing pages of the object to the user's address space. On a
305 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
306 */
307static int
05394f39
CW
308i915_gem_shmem_pread_fast(struct drm_device *dev,
309 struct drm_i915_gem_object *obj,
eb01459f 310 struct drm_i915_gem_pread *args,
05394f39 311 struct drm_file *file)
eb01459f 312{
05394f39 313 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
eb01459f 314 ssize_t remain;
e5281ccd 315 loff_t offset;
eb01459f
EA
316 char __user *user_data;
317 int page_offset, page_length;
eb01459f
EA
318
319 user_data = (char __user *) (uintptr_t) args->data_ptr;
320 remain = args->size;
321
eb01459f
EA
322 offset = args->offset;
323
324 while (remain > 0) {
e5281ccd
CW
325 struct page *page;
326 char *vaddr;
327 int ret;
328
eb01459f
EA
329 /* Operation in this page
330 *
eb01459f
EA
331 * page_offset = offset within page
332 * page_length = bytes to copy for this page
333 */
eb01459f
EA
334 page_offset = offset & (PAGE_SIZE-1);
335 page_length = remain;
336 if ((page_offset + remain) > PAGE_SIZE)
337 page_length = PAGE_SIZE - page_offset;
338
e5281ccd
CW
339 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
340 GFP_HIGHUSER | __GFP_RECLAIMABLE);
341 if (IS_ERR(page))
342 return PTR_ERR(page);
343
344 vaddr = kmap_atomic(page);
345 ret = __copy_to_user_inatomic(user_data,
346 vaddr + page_offset,
347 page_length);
348 kunmap_atomic(vaddr);
349
350 mark_page_accessed(page);
351 page_cache_release(page);
352 if (ret)
4f27b75d 353 return -EFAULT;
eb01459f
EA
354
355 remain -= page_length;
356 user_data += page_length;
357 offset += page_length;
358 }
359
4f27b75d 360 return 0;
eb01459f
EA
361}
362
363/**
364 * This is the fallback shmem pread path, which allocates temporary storage
365 * in kernel space to copy_to_user into outside of the struct_mutex, so we
366 * can copy out of the object's backing pages while holding the struct mutex
367 * and not take page faults.
368 */
369static int
05394f39
CW
370i915_gem_shmem_pread_slow(struct drm_device *dev,
371 struct drm_i915_gem_object *obj,
eb01459f 372 struct drm_i915_gem_pread *args,
05394f39 373 struct drm_file *file)
eb01459f 374{
05394f39 375 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
eb01459f
EA
376 struct mm_struct *mm = current->mm;
377 struct page **user_pages;
378 ssize_t remain;
379 loff_t offset, pinned_pages, i;
380 loff_t first_data_page, last_data_page, num_pages;
e5281ccd
CW
381 int shmem_page_offset;
382 int data_page_index, data_page_offset;
eb01459f
EA
383 int page_length;
384 int ret;
385 uint64_t data_ptr = args->data_ptr;
280b713b 386 int do_bit17_swizzling;
eb01459f
EA
387
388 remain = args->size;
389
390 /* Pin the user pages containing the data. We can't fault while
391 * holding the struct mutex, yet we want to hold it while
392 * dereferencing the user data.
393 */
394 first_data_page = data_ptr / PAGE_SIZE;
395 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
396 num_pages = last_data_page - first_data_page + 1;
397
4f27b75d 398 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
eb01459f
EA
399 if (user_pages == NULL)
400 return -ENOMEM;
401
4f27b75d 402 mutex_unlock(&dev->struct_mutex);
eb01459f
EA
403 down_read(&mm->mmap_sem);
404 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
e5e9ecde 405 num_pages, 1, 0, user_pages, NULL);
eb01459f 406 up_read(&mm->mmap_sem);
4f27b75d 407 mutex_lock(&dev->struct_mutex);
eb01459f
EA
408 if (pinned_pages < num_pages) {
409 ret = -EFAULT;
4f27b75d 410 goto out;
eb01459f
EA
411 }
412
4f27b75d
CW
413 ret = i915_gem_object_set_cpu_read_domain_range(obj,
414 args->offset,
415 args->size);
07f73f69 416 if (ret)
4f27b75d 417 goto out;
eb01459f 418
4f27b75d 419 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
eb01459f 420
eb01459f
EA
421 offset = args->offset;
422
423 while (remain > 0) {
e5281ccd
CW
424 struct page *page;
425
eb01459f
EA
426 /* Operation in this page
427 *
eb01459f
EA
428 * shmem_page_offset = offset within page in shmem file
429 * data_page_index = page number in get_user_pages return
430 * data_page_offset = offset with data_page_index page.
431 * page_length = bytes to copy for this page
432 */
eb01459f
EA
433 shmem_page_offset = offset & ~PAGE_MASK;
434 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
435 data_page_offset = data_ptr & ~PAGE_MASK;
436
437 page_length = remain;
438 if ((shmem_page_offset + page_length) > PAGE_SIZE)
439 page_length = PAGE_SIZE - shmem_page_offset;
440 if ((data_page_offset + page_length) > PAGE_SIZE)
441 page_length = PAGE_SIZE - data_page_offset;
442
e5281ccd
CW
443 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
444 GFP_HIGHUSER | __GFP_RECLAIMABLE);
445 if (IS_ERR(page))
446 return PTR_ERR(page);
447
280b713b 448 if (do_bit17_swizzling) {
e5281ccd 449 slow_shmem_bit17_copy(page,
280b713b 450 shmem_page_offset,
99a03df5
CW
451 user_pages[data_page_index],
452 data_page_offset,
453 page_length,
454 1);
455 } else {
456 slow_shmem_copy(user_pages[data_page_index],
457 data_page_offset,
e5281ccd 458 page,
99a03df5
CW
459 shmem_page_offset,
460 page_length);
280b713b 461 }
eb01459f 462
e5281ccd
CW
463 mark_page_accessed(page);
464 page_cache_release(page);
465
eb01459f
EA
466 remain -= page_length;
467 data_ptr += page_length;
468 offset += page_length;
469 }
470
4f27b75d 471out:
eb01459f
EA
472 for (i = 0; i < pinned_pages; i++) {
473 SetPageDirty(user_pages[i]);
e5281ccd 474 mark_page_accessed(user_pages[i]);
eb01459f
EA
475 page_cache_release(user_pages[i]);
476 }
8e7d2b2c 477 drm_free_large(user_pages);
eb01459f
EA
478
479 return ret;
480}
481
673a394b
EA
482/**
483 * Reads data from the object referenced by handle.
484 *
485 * On error, the contents of *data are undefined.
486 */
487int
488i915_gem_pread_ioctl(struct drm_device *dev, void *data,
05394f39 489 struct drm_file *file)
673a394b
EA
490{
491 struct drm_i915_gem_pread *args = data;
05394f39 492 struct drm_i915_gem_object *obj;
35b62a89 493 int ret = 0;
673a394b 494
51311d0a
CW
495 if (args->size == 0)
496 return 0;
497
498 if (!access_ok(VERIFY_WRITE,
499 (char __user *)(uintptr_t)args->data_ptr,
500 args->size))
501 return -EFAULT;
502
503 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
504 args->size);
505 if (ret)
506 return -EFAULT;
507
4f27b75d 508 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 509 if (ret)
4f27b75d 510 return ret;
673a394b 511
05394f39 512 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1d7cfea1
CW
513 if (obj == NULL) {
514 ret = -ENOENT;
515 goto unlock;
4f27b75d 516 }
673a394b 517
7dcd2499 518 /* Bounds check source. */
05394f39
CW
519 if (args->offset > obj->base.size ||
520 args->size > obj->base.size - args->offset) {
ce9d419d 521 ret = -EINVAL;
35b62a89 522 goto out;
ce9d419d
CW
523 }
524
4f27b75d
CW
525 ret = i915_gem_object_set_cpu_read_domain_range(obj,
526 args->offset,
527 args->size);
528 if (ret)
e5281ccd 529 goto out;
4f27b75d
CW
530
531 ret = -EFAULT;
532 if (!i915_gem_object_needs_bit17_swizzle(obj))
05394f39 533 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
4f27b75d 534 if (ret == -EFAULT)
05394f39 535 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
673a394b 536
35b62a89 537out:
05394f39 538 drm_gem_object_unreference(&obj->base);
1d7cfea1 539unlock:
4f27b75d 540 mutex_unlock(&dev->struct_mutex);
eb01459f 541 return ret;
673a394b
EA
542}
543
0839ccb8
KP
544/* This is the fast write path which cannot handle
545 * page faults in the source data
9b7530cc 546 */
0839ccb8
KP
547
548static inline int
549fast_user_write(struct io_mapping *mapping,
550 loff_t page_base, int page_offset,
551 char __user *user_data,
552 int length)
9b7530cc 553{
9b7530cc 554 char *vaddr_atomic;
0839ccb8 555 unsigned long unwritten;
9b7530cc 556
3e4d3af5 557 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
0839ccb8
KP
558 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
559 user_data, length);
3e4d3af5 560 io_mapping_unmap_atomic(vaddr_atomic);
fbd5a26d 561 return unwritten;
0839ccb8
KP
562}
563
564/* Here's the write path which can sleep for
565 * page faults
566 */
567
ab34c226 568static inline void
3de09aa3
EA
569slow_kernel_write(struct io_mapping *mapping,
570 loff_t gtt_base, int gtt_offset,
571 struct page *user_page, int user_offset,
572 int length)
0839ccb8 573{
ab34c226
CW
574 char __iomem *dst_vaddr;
575 char *src_vaddr;
0839ccb8 576
ab34c226
CW
577 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
578 src_vaddr = kmap(user_page);
579
580 memcpy_toio(dst_vaddr + gtt_offset,
581 src_vaddr + user_offset,
582 length);
583
584 kunmap(user_page);
585 io_mapping_unmap(dst_vaddr);
9b7530cc
LT
586}
587
3de09aa3
EA
588/**
589 * This is the fast pwrite path, where we copy the data directly from the
590 * user into the GTT, uncached.
591 */
673a394b 592static int
05394f39
CW
593i915_gem_gtt_pwrite_fast(struct drm_device *dev,
594 struct drm_i915_gem_object *obj,
3de09aa3 595 struct drm_i915_gem_pwrite *args,
05394f39 596 struct drm_file *file)
673a394b 597{
0839ccb8 598 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 599 ssize_t remain;
0839ccb8 600 loff_t offset, page_base;
673a394b 601 char __user *user_data;
0839ccb8 602 int page_offset, page_length;
673a394b
EA
603
604 user_data = (char __user *) (uintptr_t) args->data_ptr;
605 remain = args->size;
673a394b 606
05394f39 607 offset = obj->gtt_offset + args->offset;
673a394b
EA
608
609 while (remain > 0) {
610 /* Operation in this page
611 *
0839ccb8
KP
612 * page_base = page offset within aperture
613 * page_offset = offset within page
614 * page_length = bytes to copy for this page
673a394b 615 */
0839ccb8
KP
616 page_base = (offset & ~(PAGE_SIZE-1));
617 page_offset = offset & (PAGE_SIZE-1);
618 page_length = remain;
619 if ((page_offset + remain) > PAGE_SIZE)
620 page_length = PAGE_SIZE - page_offset;
621
0839ccb8 622 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
623 * source page isn't available. Return the error and we'll
624 * retry in the slow path.
0839ccb8 625 */
fbd5a26d
CW
626 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
627 page_offset, user_data, page_length))
628
629 return -EFAULT;
673a394b 630
0839ccb8
KP
631 remain -= page_length;
632 user_data += page_length;
633 offset += page_length;
673a394b 634 }
673a394b 635
fbd5a26d 636 return 0;
673a394b
EA
637}
638
3de09aa3
EA
639/**
640 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
641 * the memory and maps it using kmap_atomic for copying.
642 *
643 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
644 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
645 */
3043c60c 646static int
05394f39
CW
647i915_gem_gtt_pwrite_slow(struct drm_device *dev,
648 struct drm_i915_gem_object *obj,
3de09aa3 649 struct drm_i915_gem_pwrite *args,
05394f39 650 struct drm_file *file)
673a394b 651{
3de09aa3
EA
652 drm_i915_private_t *dev_priv = dev->dev_private;
653 ssize_t remain;
654 loff_t gtt_page_base, offset;
655 loff_t first_data_page, last_data_page, num_pages;
656 loff_t pinned_pages, i;
657 struct page **user_pages;
658 struct mm_struct *mm = current->mm;
659 int gtt_page_offset, data_page_offset, data_page_index, page_length;
673a394b 660 int ret;
3de09aa3
EA
661 uint64_t data_ptr = args->data_ptr;
662
663 remain = args->size;
664
665 /* Pin the user pages containing the data. We can't fault while
666 * holding the struct mutex, and all of the pwrite implementations
667 * want to hold it while dereferencing the user data.
668 */
669 first_data_page = data_ptr / PAGE_SIZE;
670 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
671 num_pages = last_data_page - first_data_page + 1;
672
fbd5a26d 673 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
3de09aa3
EA
674 if (user_pages == NULL)
675 return -ENOMEM;
676
fbd5a26d 677 mutex_unlock(&dev->struct_mutex);
3de09aa3
EA
678 down_read(&mm->mmap_sem);
679 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
680 num_pages, 0, 0, user_pages, NULL);
681 up_read(&mm->mmap_sem);
fbd5a26d 682 mutex_lock(&dev->struct_mutex);
3de09aa3
EA
683 if (pinned_pages < num_pages) {
684 ret = -EFAULT;
685 goto out_unpin_pages;
686 }
673a394b 687
d9e86c0e
CW
688 ret = i915_gem_object_set_to_gtt_domain(obj, true);
689 if (ret)
690 goto out_unpin_pages;
691
692 ret = i915_gem_object_put_fence(obj);
3de09aa3 693 if (ret)
fbd5a26d 694 goto out_unpin_pages;
3de09aa3 695
05394f39 696 offset = obj->gtt_offset + args->offset;
3de09aa3
EA
697
698 while (remain > 0) {
699 /* Operation in this page
700 *
701 * gtt_page_base = page offset within aperture
702 * gtt_page_offset = offset within page in aperture
703 * data_page_index = page number in get_user_pages return
704 * data_page_offset = offset with data_page_index page.
705 * page_length = bytes to copy for this page
706 */
707 gtt_page_base = offset & PAGE_MASK;
708 gtt_page_offset = offset & ~PAGE_MASK;
709 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
710 data_page_offset = data_ptr & ~PAGE_MASK;
711
712 page_length = remain;
713 if ((gtt_page_offset + page_length) > PAGE_SIZE)
714 page_length = PAGE_SIZE - gtt_page_offset;
715 if ((data_page_offset + page_length) > PAGE_SIZE)
716 page_length = PAGE_SIZE - data_page_offset;
717
ab34c226
CW
718 slow_kernel_write(dev_priv->mm.gtt_mapping,
719 gtt_page_base, gtt_page_offset,
720 user_pages[data_page_index],
721 data_page_offset,
722 page_length);
3de09aa3
EA
723
724 remain -= page_length;
725 offset += page_length;
726 data_ptr += page_length;
727 }
728
3de09aa3
EA
729out_unpin_pages:
730 for (i = 0; i < pinned_pages; i++)
731 page_cache_release(user_pages[i]);
8e7d2b2c 732 drm_free_large(user_pages);
3de09aa3
EA
733
734 return ret;
735}
736
40123c1f
EA
737/**
738 * This is the fast shmem pwrite path, which attempts to directly
739 * copy_from_user into the kmapped pages backing the object.
740 */
3043c60c 741static int
05394f39
CW
742i915_gem_shmem_pwrite_fast(struct drm_device *dev,
743 struct drm_i915_gem_object *obj,
40123c1f 744 struct drm_i915_gem_pwrite *args,
05394f39 745 struct drm_file *file)
673a394b 746{
05394f39 747 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
40123c1f 748 ssize_t remain;
e5281ccd 749 loff_t offset;
40123c1f
EA
750 char __user *user_data;
751 int page_offset, page_length;
40123c1f
EA
752
753 user_data = (char __user *) (uintptr_t) args->data_ptr;
754 remain = args->size;
673a394b 755
40123c1f 756 offset = args->offset;
05394f39 757 obj->dirty = 1;
40123c1f
EA
758
759 while (remain > 0) {
e5281ccd
CW
760 struct page *page;
761 char *vaddr;
762 int ret;
763
40123c1f
EA
764 /* Operation in this page
765 *
40123c1f
EA
766 * page_offset = offset within page
767 * page_length = bytes to copy for this page
768 */
40123c1f
EA
769 page_offset = offset & (PAGE_SIZE-1);
770 page_length = remain;
771 if ((page_offset + remain) > PAGE_SIZE)
772 page_length = PAGE_SIZE - page_offset;
773
e5281ccd
CW
774 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
775 GFP_HIGHUSER | __GFP_RECLAIMABLE);
776 if (IS_ERR(page))
777 return PTR_ERR(page);
778
779 vaddr = kmap_atomic(page, KM_USER0);
780 ret = __copy_from_user_inatomic(vaddr + page_offset,
781 user_data,
782 page_length);
783 kunmap_atomic(vaddr, KM_USER0);
784
785 set_page_dirty(page);
786 mark_page_accessed(page);
787 page_cache_release(page);
788
789 /* If we get a fault while copying data, then (presumably) our
790 * source page isn't available. Return the error and we'll
791 * retry in the slow path.
792 */
793 if (ret)
fbd5a26d 794 return -EFAULT;
40123c1f
EA
795
796 remain -= page_length;
797 user_data += page_length;
798 offset += page_length;
799 }
800
fbd5a26d 801 return 0;
40123c1f
EA
802}
803
804/**
805 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
806 * the memory and maps it using kmap_atomic for copying.
807 *
808 * This avoids taking mmap_sem for faulting on the user's address while the
809 * struct_mutex is held.
810 */
811static int
05394f39
CW
812i915_gem_shmem_pwrite_slow(struct drm_device *dev,
813 struct drm_i915_gem_object *obj,
40123c1f 814 struct drm_i915_gem_pwrite *args,
05394f39 815 struct drm_file *file)
40123c1f 816{
05394f39 817 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
40123c1f
EA
818 struct mm_struct *mm = current->mm;
819 struct page **user_pages;
820 ssize_t remain;
821 loff_t offset, pinned_pages, i;
822 loff_t first_data_page, last_data_page, num_pages;
e5281ccd 823 int shmem_page_offset;
40123c1f
EA
824 int data_page_index, data_page_offset;
825 int page_length;
826 int ret;
827 uint64_t data_ptr = args->data_ptr;
280b713b 828 int do_bit17_swizzling;
40123c1f
EA
829
830 remain = args->size;
831
832 /* Pin the user pages containing the data. We can't fault while
833 * holding the struct mutex, and all of the pwrite implementations
834 * want to hold it while dereferencing the user data.
835 */
836 first_data_page = data_ptr / PAGE_SIZE;
837 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
838 num_pages = last_data_page - first_data_page + 1;
839
4f27b75d 840 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
40123c1f
EA
841 if (user_pages == NULL)
842 return -ENOMEM;
843
fbd5a26d 844 mutex_unlock(&dev->struct_mutex);
40123c1f
EA
845 down_read(&mm->mmap_sem);
846 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
847 num_pages, 0, 0, user_pages, NULL);
848 up_read(&mm->mmap_sem);
fbd5a26d 849 mutex_lock(&dev->struct_mutex);
40123c1f
EA
850 if (pinned_pages < num_pages) {
851 ret = -EFAULT;
fbd5a26d 852 goto out;
673a394b
EA
853 }
854
fbd5a26d 855 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
07f73f69 856 if (ret)
fbd5a26d 857 goto out;
40123c1f 858
fbd5a26d 859 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
40123c1f 860
673a394b 861 offset = args->offset;
05394f39 862 obj->dirty = 1;
673a394b 863
40123c1f 864 while (remain > 0) {
e5281ccd
CW
865 struct page *page;
866
40123c1f
EA
867 /* Operation in this page
868 *
40123c1f
EA
869 * shmem_page_offset = offset within page in shmem file
870 * data_page_index = page number in get_user_pages return
871 * data_page_offset = offset with data_page_index page.
872 * page_length = bytes to copy for this page
873 */
40123c1f
EA
874 shmem_page_offset = offset & ~PAGE_MASK;
875 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
876 data_page_offset = data_ptr & ~PAGE_MASK;
877
878 page_length = remain;
879 if ((shmem_page_offset + page_length) > PAGE_SIZE)
880 page_length = PAGE_SIZE - shmem_page_offset;
881 if ((data_page_offset + page_length) > PAGE_SIZE)
882 page_length = PAGE_SIZE - data_page_offset;
883
e5281ccd
CW
884 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
885 GFP_HIGHUSER | __GFP_RECLAIMABLE);
886 if (IS_ERR(page)) {
887 ret = PTR_ERR(page);
888 goto out;
889 }
890
280b713b 891 if (do_bit17_swizzling) {
e5281ccd 892 slow_shmem_bit17_copy(page,
280b713b
EA
893 shmem_page_offset,
894 user_pages[data_page_index],
895 data_page_offset,
99a03df5
CW
896 page_length,
897 0);
898 } else {
e5281ccd 899 slow_shmem_copy(page,
99a03df5
CW
900 shmem_page_offset,
901 user_pages[data_page_index],
902 data_page_offset,
903 page_length);
280b713b 904 }
40123c1f 905
e5281ccd
CW
906 set_page_dirty(page);
907 mark_page_accessed(page);
908 page_cache_release(page);
909
40123c1f
EA
910 remain -= page_length;
911 data_ptr += page_length;
912 offset += page_length;
673a394b
EA
913 }
914
fbd5a26d 915out:
40123c1f
EA
916 for (i = 0; i < pinned_pages; i++)
917 page_cache_release(user_pages[i]);
8e7d2b2c 918 drm_free_large(user_pages);
673a394b 919
40123c1f 920 return ret;
673a394b
EA
921}
922
923/**
924 * Writes data to the object referenced by handle.
925 *
926 * On error, the contents of the buffer that were to be modified are undefined.
927 */
928int
929i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
fbd5a26d 930 struct drm_file *file)
673a394b
EA
931{
932 struct drm_i915_gem_pwrite *args = data;
05394f39 933 struct drm_i915_gem_object *obj;
51311d0a
CW
934 int ret;
935
936 if (args->size == 0)
937 return 0;
938
939 if (!access_ok(VERIFY_READ,
940 (char __user *)(uintptr_t)args->data_ptr,
941 args->size))
942 return -EFAULT;
943
944 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
945 args->size);
946 if (ret)
947 return -EFAULT;
673a394b 948
fbd5a26d 949 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 950 if (ret)
fbd5a26d 951 return ret;
1d7cfea1 952
05394f39 953 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1d7cfea1
CW
954 if (obj == NULL) {
955 ret = -ENOENT;
956 goto unlock;
fbd5a26d 957 }
673a394b 958
7dcd2499 959 /* Bounds check destination. */
05394f39
CW
960 if (args->offset > obj->base.size ||
961 args->size > obj->base.size - args->offset) {
ce9d419d 962 ret = -EINVAL;
35b62a89 963 goto out;
ce9d419d
CW
964 }
965
673a394b
EA
966 /* We can only do the GTT pwrite on untiled buffers, as otherwise
967 * it would end up going through the fenced access, and we'll get
968 * different detiling behavior between reading and writing.
969 * pread/pwrite currently are reading and writing from the CPU
970 * perspective, requiring manual detiling by the client.
971 */
05394f39 972 if (obj->phys_obj)
fbd5a26d 973 ret = i915_gem_phys_pwrite(dev, obj, args, file);
d9e86c0e 974 else if (obj->gtt_space &&
05394f39 975 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
75e9e915 976 ret = i915_gem_object_pin(obj, 0, true);
fbd5a26d
CW
977 if (ret)
978 goto out;
979
d9e86c0e
CW
980 ret = i915_gem_object_set_to_gtt_domain(obj, true);
981 if (ret)
982 goto out_unpin;
983
984 ret = i915_gem_object_put_fence(obj);
fbd5a26d
CW
985 if (ret)
986 goto out_unpin;
987
988 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
989 if (ret == -EFAULT)
990 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
991
992out_unpin:
993 i915_gem_object_unpin(obj);
40123c1f 994 } else {
fbd5a26d
CW
995 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
996 if (ret)
e5281ccd 997 goto out;
673a394b 998
fbd5a26d
CW
999 ret = -EFAULT;
1000 if (!i915_gem_object_needs_bit17_swizzle(obj))
1001 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1002 if (ret == -EFAULT)
1003 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
fbd5a26d 1004 }
673a394b 1005
35b62a89 1006out:
05394f39 1007 drm_gem_object_unreference(&obj->base);
1d7cfea1 1008unlock:
fbd5a26d 1009 mutex_unlock(&dev->struct_mutex);
673a394b
EA
1010 return ret;
1011}
1012
1013/**
2ef7eeaa
EA
1014 * Called when user space prepares to use an object with the CPU, either
1015 * through the mmap ioctl's mapping or a GTT mapping.
673a394b
EA
1016 */
1017int
1018i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
05394f39 1019 struct drm_file *file)
673a394b
EA
1020{
1021 struct drm_i915_gem_set_domain *args = data;
05394f39 1022 struct drm_i915_gem_object *obj;
2ef7eeaa
EA
1023 uint32_t read_domains = args->read_domains;
1024 uint32_t write_domain = args->write_domain;
673a394b
EA
1025 int ret;
1026
1027 if (!(dev->driver->driver_features & DRIVER_GEM))
1028 return -ENODEV;
1029
2ef7eeaa 1030 /* Only handle setting domains to types used by the CPU. */
21d509e3 1031 if (write_domain & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1032 return -EINVAL;
1033
21d509e3 1034 if (read_domains & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1035 return -EINVAL;
1036
1037 /* Having something in the write domain implies it's in the read
1038 * domain, and only that read domain. Enforce that in the request.
1039 */
1040 if (write_domain != 0 && read_domains != write_domain)
1041 return -EINVAL;
1042
76c1dec1 1043 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1044 if (ret)
76c1dec1 1045 return ret;
1d7cfea1 1046
05394f39 1047 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1d7cfea1
CW
1048 if (obj == NULL) {
1049 ret = -ENOENT;
1050 goto unlock;
76c1dec1 1051 }
673a394b 1052
2ef7eeaa
EA
1053 if (read_domains & I915_GEM_DOMAIN_GTT) {
1054 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
02354392
EA
1055
1056 /* Silently promote "you're not bound, there was nothing to do"
1057 * to success, since the client was just asking us to
1058 * make sure everything was done.
1059 */
1060 if (ret == -EINVAL)
1061 ret = 0;
2ef7eeaa 1062 } else {
e47c68e9 1063 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
2ef7eeaa
EA
1064 }
1065
05394f39 1066 drm_gem_object_unreference(&obj->base);
1d7cfea1 1067unlock:
673a394b
EA
1068 mutex_unlock(&dev->struct_mutex);
1069 return ret;
1070}
1071
1072/**
1073 * Called when user space has done writes to this buffer
1074 */
1075int
1076i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
05394f39 1077 struct drm_file *file)
673a394b
EA
1078{
1079 struct drm_i915_gem_sw_finish *args = data;
05394f39 1080 struct drm_i915_gem_object *obj;
673a394b
EA
1081 int ret = 0;
1082
1083 if (!(dev->driver->driver_features & DRIVER_GEM))
1084 return -ENODEV;
1085
76c1dec1 1086 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1087 if (ret)
76c1dec1 1088 return ret;
1d7cfea1 1089
05394f39 1090 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
673a394b 1091 if (obj == NULL) {
1d7cfea1
CW
1092 ret = -ENOENT;
1093 goto unlock;
673a394b
EA
1094 }
1095
673a394b 1096 /* Pinned buffers may be scanout, so flush the cache */
05394f39 1097 if (obj->pin_count)
e47c68e9
EA
1098 i915_gem_object_flush_cpu_write_domain(obj);
1099
05394f39 1100 drm_gem_object_unreference(&obj->base);
1d7cfea1 1101unlock:
673a394b
EA
1102 mutex_unlock(&dev->struct_mutex);
1103 return ret;
1104}
1105
1106/**
1107 * Maps the contents of an object, returning the address it is mapped
1108 * into.
1109 *
1110 * While the mapping holds a reference on the contents of the object, it doesn't
1111 * imply a ref on the object itself.
1112 */
1113int
1114i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
05394f39 1115 struct drm_file *file)
673a394b 1116{
da761a6e 1117 struct drm_i915_private *dev_priv = dev->dev_private;
673a394b
EA
1118 struct drm_i915_gem_mmap *args = data;
1119 struct drm_gem_object *obj;
1120 loff_t offset;
1121 unsigned long addr;
1122
1123 if (!(dev->driver->driver_features & DRIVER_GEM))
1124 return -ENODEV;
1125
05394f39 1126 obj = drm_gem_object_lookup(dev, file, args->handle);
673a394b 1127 if (obj == NULL)
bf79cb91 1128 return -ENOENT;
673a394b 1129
da761a6e
CW
1130 if (obj->size > dev_priv->mm.gtt_mappable_end) {
1131 drm_gem_object_unreference_unlocked(obj);
1132 return -E2BIG;
1133 }
1134
673a394b
EA
1135 offset = args->offset;
1136
1137 down_write(&current->mm->mmap_sem);
1138 addr = do_mmap(obj->filp, 0, args->size,
1139 PROT_READ | PROT_WRITE, MAP_SHARED,
1140 args->offset);
1141 up_write(&current->mm->mmap_sem);
bc9025bd 1142 drm_gem_object_unreference_unlocked(obj);
673a394b
EA
1143 if (IS_ERR((void *)addr))
1144 return addr;
1145
1146 args->addr_ptr = (uint64_t) addr;
1147
1148 return 0;
1149}
1150
de151cf6
JB
1151/**
1152 * i915_gem_fault - fault a page into the GTT
1153 * vma: VMA in question
1154 * vmf: fault info
1155 *
1156 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1157 * from userspace. The fault handler takes care of binding the object to
1158 * the GTT (if needed), allocating and programming a fence register (again,
1159 * only if needed based on whether the old reg is still valid or the object
1160 * is tiled) and inserting a new PTE into the faulting process.
1161 *
1162 * Note that the faulting process may involve evicting existing objects
1163 * from the GTT and/or fence registers to make room. So performance may
1164 * suffer if the GTT working set is large or there are few fence registers
1165 * left.
1166 */
1167int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1168{
05394f39
CW
1169 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1170 struct drm_device *dev = obj->base.dev;
7d1c4804 1171 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6
JB
1172 pgoff_t page_offset;
1173 unsigned long pfn;
1174 int ret = 0;
0f973f27 1175 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
de151cf6
JB
1176
1177 /* We don't use vmf->pgoff since that has the fake offset */
1178 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1179 PAGE_SHIFT;
1180
1181 /* Now bind it into the GTT if needed */
1182 mutex_lock(&dev->struct_mutex);
a00b10c3 1183
919926ae
CW
1184 if (!obj->map_and_fenceable) {
1185 ret = i915_gem_object_unbind(obj);
1186 if (ret)
1187 goto unlock;
a00b10c3 1188 }
05394f39 1189 if (!obj->gtt_space) {
75e9e915 1190 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
c715089f
CW
1191 if (ret)
1192 goto unlock;
de151cf6
JB
1193 }
1194
4a684a41
CW
1195 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1196 if (ret)
1197 goto unlock;
1198
d9e86c0e
CW
1199 if (obj->tiling_mode == I915_TILING_NONE)
1200 ret = i915_gem_object_put_fence(obj);
1201 else
1202 ret = i915_gem_object_get_fence(obj, NULL, true);
1203 if (ret)
1204 goto unlock;
de151cf6 1205
05394f39
CW
1206 if (i915_gem_object_is_inactive(obj))
1207 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
7d1c4804 1208
6299f992
CW
1209 obj->fault_mappable = true;
1210
05394f39 1211 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
de151cf6
JB
1212 page_offset;
1213
1214 /* Finally, remap it using the new GTT offset */
1215 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
c715089f 1216unlock:
de151cf6
JB
1217 mutex_unlock(&dev->struct_mutex);
1218
1219 switch (ret) {
045e769a
CW
1220 case -EAGAIN:
1221 set_need_resched();
c715089f
CW
1222 case 0:
1223 case -ERESTARTSYS:
1224 return VM_FAULT_NOPAGE;
de151cf6 1225 case -ENOMEM:
de151cf6 1226 return VM_FAULT_OOM;
de151cf6 1227 default:
c715089f 1228 return VM_FAULT_SIGBUS;
de151cf6
JB
1229 }
1230}
1231
1232/**
1233 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1234 * @obj: obj in question
1235 *
1236 * GEM memory mapping works by handing back to userspace a fake mmap offset
1237 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1238 * up the object based on the offset and sets up the various memory mapping
1239 * structures.
1240 *
1241 * This routine allocates and attaches a fake offset for @obj.
1242 */
1243static int
05394f39 1244i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
de151cf6 1245{
05394f39 1246 struct drm_device *dev = obj->base.dev;
de151cf6 1247 struct drm_gem_mm *mm = dev->mm_private;
de151cf6 1248 struct drm_map_list *list;
f77d390c 1249 struct drm_local_map *map;
de151cf6
JB
1250 int ret = 0;
1251
1252 /* Set the object up for mmap'ing */
05394f39 1253 list = &obj->base.map_list;
9a298b2a 1254 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
de151cf6
JB
1255 if (!list->map)
1256 return -ENOMEM;
1257
1258 map = list->map;
1259 map->type = _DRM_GEM;
05394f39 1260 map->size = obj->base.size;
de151cf6
JB
1261 map->handle = obj;
1262
1263 /* Get a DRM GEM mmap offset allocated... */
1264 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
05394f39
CW
1265 obj->base.size / PAGE_SIZE,
1266 0, 0);
de151cf6 1267 if (!list->file_offset_node) {
05394f39
CW
1268 DRM_ERROR("failed to allocate offset for bo %d\n",
1269 obj->base.name);
9e0ae534 1270 ret = -ENOSPC;
de151cf6
JB
1271 goto out_free_list;
1272 }
1273
1274 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
05394f39
CW
1275 obj->base.size / PAGE_SIZE,
1276 0);
de151cf6
JB
1277 if (!list->file_offset_node) {
1278 ret = -ENOMEM;
1279 goto out_free_list;
1280 }
1281
1282 list->hash.key = list->file_offset_node->start;
9e0ae534
CW
1283 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1284 if (ret) {
de151cf6
JB
1285 DRM_ERROR("failed to add to map hash\n");
1286 goto out_free_mm;
1287 }
1288
de151cf6
JB
1289 return 0;
1290
1291out_free_mm:
1292 drm_mm_put_block(list->file_offset_node);
1293out_free_list:
9a298b2a 1294 kfree(list->map);
39a01d1f 1295 list->map = NULL;
de151cf6
JB
1296
1297 return ret;
1298}
1299
901782b2
CW
1300/**
1301 * i915_gem_release_mmap - remove physical page mappings
1302 * @obj: obj in question
1303 *
af901ca1 1304 * Preserve the reservation of the mmapping with the DRM core code, but
901782b2
CW
1305 * relinquish ownership of the pages back to the system.
1306 *
1307 * It is vital that we remove the page mapping if we have mapped a tiled
1308 * object through the GTT and then lose the fence register due to
1309 * resource pressure. Similarly if the object has been moved out of the
1310 * aperture, than pages mapped into userspace must be revoked. Removing the
1311 * mapping will then trigger a page fault on the next user access, allowing
1312 * fixup by i915_gem_fault().
1313 */
d05ca301 1314void
05394f39 1315i915_gem_release_mmap(struct drm_i915_gem_object *obj)
901782b2 1316{
6299f992
CW
1317 if (!obj->fault_mappable)
1318 return;
901782b2 1319
6299f992
CW
1320 unmap_mapping_range(obj->base.dev->dev_mapping,
1321 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1322 obj->base.size, 1);
fb7d516a 1323
6299f992 1324 obj->fault_mappable = false;
901782b2
CW
1325}
1326
ab00b3e5 1327static void
05394f39 1328i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
ab00b3e5 1329{
05394f39 1330 struct drm_device *dev = obj->base.dev;
ab00b3e5 1331 struct drm_gem_mm *mm = dev->mm_private;
05394f39 1332 struct drm_map_list *list = &obj->base.map_list;
ab00b3e5 1333
ab00b3e5 1334 drm_ht_remove_item(&mm->offset_hash, &list->hash);
39a01d1f
CW
1335 drm_mm_put_block(list->file_offset_node);
1336 kfree(list->map);
1337 list->map = NULL;
ab00b3e5
JB
1338}
1339
92b88aeb
CW
1340static uint32_t
1341i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
1342{
1343 struct drm_device *dev = obj->base.dev;
1344 uint32_t size;
1345
1346 if (INTEL_INFO(dev)->gen >= 4 ||
1347 obj->tiling_mode == I915_TILING_NONE)
1348 return obj->base.size;
1349
1350 /* Previous chips need a power-of-two fence region when tiling */
1351 if (INTEL_INFO(dev)->gen == 3)
1352 size = 1024*1024;
1353 else
1354 size = 512*1024;
1355
1356 while (size < obj->base.size)
1357 size <<= 1;
1358
1359 return size;
1360}
1361
de151cf6
JB
1362/**
1363 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1364 * @obj: object to check
1365 *
1366 * Return the required GTT alignment for an object, taking into account
5e783301 1367 * potential fence register mapping.
de151cf6
JB
1368 */
1369static uint32_t
05394f39 1370i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
de151cf6 1371{
05394f39 1372 struct drm_device *dev = obj->base.dev;
de151cf6
JB
1373
1374 /*
1375 * Minimum alignment is 4k (GTT page size), but might be greater
1376 * if a fence register is needed for the object.
1377 */
a00b10c3 1378 if (INTEL_INFO(dev)->gen >= 4 ||
05394f39 1379 obj->tiling_mode == I915_TILING_NONE)
de151cf6
JB
1380 return 4096;
1381
a00b10c3
CW
1382 /*
1383 * Previous chips need to be aligned to the size of the smallest
1384 * fence register that can contain the object.
1385 */
05394f39 1386 return i915_gem_get_gtt_size(obj);
a00b10c3
CW
1387}
1388
5e783301
DV
1389/**
1390 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1391 * unfenced object
1392 * @obj: object to check
1393 *
1394 * Return the required GTT alignment for an object, only taking into account
1395 * unfenced tiled surface requirements.
1396 */
1397static uint32_t
05394f39 1398i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
5e783301 1399{
05394f39 1400 struct drm_device *dev = obj->base.dev;
5e783301
DV
1401 int tile_height;
1402
1403 /*
1404 * Minimum alignment is 4k (GTT page size) for sane hw.
1405 */
1406 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
05394f39 1407 obj->tiling_mode == I915_TILING_NONE)
5e783301
DV
1408 return 4096;
1409
1410 /*
1411 * Older chips need unfenced tiled buffers to be aligned to the left
1412 * edge of an even tile row (where tile rows are counted as if the bo is
1413 * placed in a fenced gtt region).
1414 */
1415 if (IS_GEN2(dev) ||
05394f39 1416 (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
5e783301
DV
1417 tile_height = 32;
1418 else
1419 tile_height = 8;
1420
05394f39 1421 return tile_height * obj->stride * 2;
5e783301
DV
1422}
1423
de151cf6
JB
1424/**
1425 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1426 * @dev: DRM device
1427 * @data: GTT mapping ioctl data
05394f39 1428 * @file: GEM object info
de151cf6
JB
1429 *
1430 * Simply returns the fake offset to userspace so it can mmap it.
1431 * The mmap call will end up in drm_gem_mmap(), which will set things
1432 * up so we can get faults in the handler above.
1433 *
1434 * The fault handler will take care of binding the object into the GTT
1435 * (since it may have been evicted to make room for something), allocating
1436 * a fence register, and mapping the appropriate aperture address into
1437 * userspace.
1438 */
1439int
1440i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
05394f39 1441 struct drm_file *file)
de151cf6 1442{
da761a6e 1443 struct drm_i915_private *dev_priv = dev->dev_private;
de151cf6 1444 struct drm_i915_gem_mmap_gtt *args = data;
05394f39 1445 struct drm_i915_gem_object *obj;
de151cf6
JB
1446 int ret;
1447
1448 if (!(dev->driver->driver_features & DRIVER_GEM))
1449 return -ENODEV;
1450
76c1dec1 1451 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1452 if (ret)
76c1dec1 1453 return ret;
de151cf6 1454
05394f39 1455 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1d7cfea1
CW
1456 if (obj == NULL) {
1457 ret = -ENOENT;
1458 goto unlock;
1459 }
de151cf6 1460
05394f39 1461 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
da761a6e
CW
1462 ret = -E2BIG;
1463 goto unlock;
1464 }
1465
05394f39 1466 if (obj->madv != I915_MADV_WILLNEED) {
ab18282d 1467 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1d7cfea1
CW
1468 ret = -EINVAL;
1469 goto out;
ab18282d
CW
1470 }
1471
05394f39 1472 if (!obj->base.map_list.map) {
de151cf6 1473 ret = i915_gem_create_mmap_offset(obj);
1d7cfea1
CW
1474 if (ret)
1475 goto out;
de151cf6
JB
1476 }
1477
05394f39 1478 args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
de151cf6 1479
1d7cfea1 1480out:
05394f39 1481 drm_gem_object_unreference(&obj->base);
1d7cfea1 1482unlock:
de151cf6 1483 mutex_unlock(&dev->struct_mutex);
1d7cfea1 1484 return ret;
de151cf6
JB
1485}
1486
e5281ccd 1487static int
05394f39 1488i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
e5281ccd
CW
1489 gfp_t gfpmask)
1490{
e5281ccd
CW
1491 int page_count, i;
1492 struct address_space *mapping;
1493 struct inode *inode;
1494 struct page *page;
1495
1496 /* Get the list of pages out of our struct file. They'll be pinned
1497 * at this point until we release them.
1498 */
05394f39
CW
1499 page_count = obj->base.size / PAGE_SIZE;
1500 BUG_ON(obj->pages != NULL);
1501 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1502 if (obj->pages == NULL)
e5281ccd
CW
1503 return -ENOMEM;
1504
05394f39 1505 inode = obj->base.filp->f_path.dentry->d_inode;
e5281ccd
CW
1506 mapping = inode->i_mapping;
1507 for (i = 0; i < page_count; i++) {
1508 page = read_cache_page_gfp(mapping, i,
1509 GFP_HIGHUSER |
1510 __GFP_COLD |
1511 __GFP_RECLAIMABLE |
1512 gfpmask);
1513 if (IS_ERR(page))
1514 goto err_pages;
1515
05394f39 1516 obj->pages[i] = page;
e5281ccd
CW
1517 }
1518
05394f39 1519 if (obj->tiling_mode != I915_TILING_NONE)
e5281ccd
CW
1520 i915_gem_object_do_bit_17_swizzle(obj);
1521
1522 return 0;
1523
1524err_pages:
1525 while (i--)
05394f39 1526 page_cache_release(obj->pages[i]);
e5281ccd 1527
05394f39
CW
1528 drm_free_large(obj->pages);
1529 obj->pages = NULL;
e5281ccd
CW
1530 return PTR_ERR(page);
1531}
1532
5cdf5881 1533static void
05394f39 1534i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
673a394b 1535{
05394f39 1536 int page_count = obj->base.size / PAGE_SIZE;
673a394b
EA
1537 int i;
1538
05394f39 1539 BUG_ON(obj->madv == __I915_MADV_PURGED);
673a394b 1540
05394f39 1541 if (obj->tiling_mode != I915_TILING_NONE)
280b713b
EA
1542 i915_gem_object_save_bit_17_swizzle(obj);
1543
05394f39
CW
1544 if (obj->madv == I915_MADV_DONTNEED)
1545 obj->dirty = 0;
3ef94daa
CW
1546
1547 for (i = 0; i < page_count; i++) {
05394f39
CW
1548 if (obj->dirty)
1549 set_page_dirty(obj->pages[i]);
3ef94daa 1550
05394f39
CW
1551 if (obj->madv == I915_MADV_WILLNEED)
1552 mark_page_accessed(obj->pages[i]);
3ef94daa 1553
05394f39 1554 page_cache_release(obj->pages[i]);
3ef94daa 1555 }
05394f39 1556 obj->dirty = 0;
673a394b 1557
05394f39
CW
1558 drm_free_large(obj->pages);
1559 obj->pages = NULL;
673a394b
EA
1560}
1561
54cf91dc 1562void
05394f39 1563i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1ec14ad3
CW
1564 struct intel_ring_buffer *ring,
1565 u32 seqno)
673a394b 1566{
05394f39 1567 struct drm_device *dev = obj->base.dev;
69dc4987 1568 struct drm_i915_private *dev_priv = dev->dev_private;
617dbe27 1569
852835f3 1570 BUG_ON(ring == NULL);
05394f39 1571 obj->ring = ring;
673a394b
EA
1572
1573 /* Add a reference if we're newly entering the active list. */
05394f39
CW
1574 if (!obj->active) {
1575 drm_gem_object_reference(&obj->base);
1576 obj->active = 1;
673a394b 1577 }
e35a41de 1578
673a394b 1579 /* Move from whatever list we were on to the tail of execution. */
05394f39
CW
1580 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1581 list_move_tail(&obj->ring_list, &ring->active_list);
caea7476 1582
05394f39 1583 obj->last_rendering_seqno = seqno;
caea7476
CW
1584 if (obj->fenced_gpu_access) {
1585 struct drm_i915_fence_reg *reg;
1586
1587 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1588
1589 obj->last_fenced_seqno = seqno;
1590 obj->last_fenced_ring = ring;
1591
1592 reg = &dev_priv->fence_regs[obj->fence_reg];
1593 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
1594 }
1595}
1596
1597static void
1598i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1599{
1600 list_del_init(&obj->ring_list);
1601 obj->last_rendering_seqno = 0;
673a394b
EA
1602}
1603
ce44b0ea 1604static void
05394f39 1605i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
ce44b0ea 1606{
05394f39 1607 struct drm_device *dev = obj->base.dev;
ce44b0ea 1608 drm_i915_private_t *dev_priv = dev->dev_private;
ce44b0ea 1609
05394f39
CW
1610 BUG_ON(!obj->active);
1611 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
caea7476
CW
1612
1613 i915_gem_object_move_off_active(obj);
1614}
1615
1616static void
1617i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1618{
1619 struct drm_device *dev = obj->base.dev;
1620 struct drm_i915_private *dev_priv = dev->dev_private;
1621
1622 if (obj->pin_count != 0)
1623 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1624 else
1625 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1626
1627 BUG_ON(!list_empty(&obj->gpu_write_list));
1628 BUG_ON(!obj->active);
1629 obj->ring = NULL;
1630
1631 i915_gem_object_move_off_active(obj);
1632 obj->fenced_gpu_access = false;
caea7476
CW
1633
1634 obj->active = 0;
87ca9c8a 1635 obj->pending_gpu_write = false;
caea7476
CW
1636 drm_gem_object_unreference(&obj->base);
1637
1638 WARN_ON(i915_verify_lists(dev));
ce44b0ea 1639}
673a394b 1640
963b4836
CW
1641/* Immediately discard the backing storage */
1642static void
05394f39 1643i915_gem_object_truncate(struct drm_i915_gem_object *obj)
963b4836 1644{
bb6baf76 1645 struct inode *inode;
963b4836 1646
ae9fed6b
CW
1647 /* Our goal here is to return as much of the memory as
1648 * is possible back to the system as we are called from OOM.
1649 * To do this we must instruct the shmfs to drop all of its
1650 * backing pages, *now*. Here we mirror the actions taken
1651 * when by shmem_delete_inode() to release the backing store.
1652 */
05394f39 1653 inode = obj->base.filp->f_path.dentry->d_inode;
ae9fed6b
CW
1654 truncate_inode_pages(inode->i_mapping, 0);
1655 if (inode->i_op->truncate_range)
1656 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
bb6baf76 1657
05394f39 1658 obj->madv = __I915_MADV_PURGED;
963b4836
CW
1659}
1660
1661static inline int
05394f39 1662i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
963b4836 1663{
05394f39 1664 return obj->madv == I915_MADV_DONTNEED;
963b4836
CW
1665}
1666
63560396
DV
1667static void
1668i915_gem_process_flushing_list(struct drm_device *dev,
8a1a49f9 1669 uint32_t flush_domains,
852835f3 1670 struct intel_ring_buffer *ring)
63560396 1671{
05394f39 1672 struct drm_i915_gem_object *obj, *next;
63560396 1673
05394f39 1674 list_for_each_entry_safe(obj, next,
64193406 1675 &ring->gpu_write_list,
63560396 1676 gpu_write_list) {
05394f39
CW
1677 if (obj->base.write_domain & flush_domains) {
1678 uint32_t old_write_domain = obj->base.write_domain;
63560396 1679
05394f39
CW
1680 obj->base.write_domain = 0;
1681 list_del_init(&obj->gpu_write_list);
1ec14ad3
CW
1682 i915_gem_object_move_to_active(obj, ring,
1683 i915_gem_next_request_seqno(dev, ring));
63560396 1684
63560396 1685 trace_i915_gem_object_change_domain(obj,
05394f39 1686 obj->base.read_domains,
63560396
DV
1687 old_write_domain);
1688 }
1689 }
1690}
8187a2b7 1691
3cce469c 1692int
8a1a49f9 1693i915_add_request(struct drm_device *dev,
f787a5f5 1694 struct drm_file *file,
8dc5d147 1695 struct drm_i915_gem_request *request,
8a1a49f9 1696 struct intel_ring_buffer *ring)
673a394b
EA
1697{
1698 drm_i915_private_t *dev_priv = dev->dev_private;
f787a5f5 1699 struct drm_i915_file_private *file_priv = NULL;
673a394b
EA
1700 uint32_t seqno;
1701 int was_empty;
3cce469c
CW
1702 int ret;
1703
1704 BUG_ON(request == NULL);
673a394b 1705
f787a5f5
CW
1706 if (file != NULL)
1707 file_priv = file->driver_priv;
b962442e 1708
3cce469c
CW
1709 ret = ring->add_request(ring, &seqno);
1710 if (ret)
1711 return ret;
673a394b 1712
a56ba56c 1713 ring->outstanding_lazy_request = false;
673a394b
EA
1714
1715 request->seqno = seqno;
852835f3 1716 request->ring = ring;
673a394b 1717 request->emitted_jiffies = jiffies;
852835f3
ZN
1718 was_empty = list_empty(&ring->request_list);
1719 list_add_tail(&request->list, &ring->request_list);
1720
f787a5f5 1721 if (file_priv) {
1c25595f 1722 spin_lock(&file_priv->mm.lock);
f787a5f5 1723 request->file_priv = file_priv;
b962442e 1724 list_add_tail(&request->client_list,
f787a5f5 1725 &file_priv->mm.request_list);
1c25595f 1726 spin_unlock(&file_priv->mm.lock);
b962442e 1727 }
673a394b 1728
f65d9421 1729 if (!dev_priv->mm.suspended) {
b3b079db
CW
1730 mod_timer(&dev_priv->hangcheck_timer,
1731 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
f65d9421 1732 if (was_empty)
b3b079db
CW
1733 queue_delayed_work(dev_priv->wq,
1734 &dev_priv->mm.retire_work, HZ);
f65d9421 1735 }
3cce469c 1736 return 0;
673a394b
EA
1737}
1738
f787a5f5
CW
1739static inline void
1740i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
673a394b 1741{
1c25595f 1742 struct drm_i915_file_private *file_priv = request->file_priv;
673a394b 1743
1c25595f
CW
1744 if (!file_priv)
1745 return;
1c5d22f7 1746
1c25595f
CW
1747 spin_lock(&file_priv->mm.lock);
1748 list_del(&request->client_list);
1749 request->file_priv = NULL;
1750 spin_unlock(&file_priv->mm.lock);
673a394b 1751}
673a394b 1752
dfaae392
CW
1753static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1754 struct intel_ring_buffer *ring)
9375e446 1755{
dfaae392
CW
1756 while (!list_empty(&ring->request_list)) {
1757 struct drm_i915_gem_request *request;
673a394b 1758
dfaae392
CW
1759 request = list_first_entry(&ring->request_list,
1760 struct drm_i915_gem_request,
1761 list);
de151cf6 1762
dfaae392 1763 list_del(&request->list);
f787a5f5 1764 i915_gem_request_remove_from_client(request);
dfaae392
CW
1765 kfree(request);
1766 }
673a394b 1767
dfaae392 1768 while (!list_empty(&ring->active_list)) {
05394f39 1769 struct drm_i915_gem_object *obj;
9375e446 1770
05394f39
CW
1771 obj = list_first_entry(&ring->active_list,
1772 struct drm_i915_gem_object,
1773 ring_list);
9375e446 1774
05394f39
CW
1775 obj->base.write_domain = 0;
1776 list_del_init(&obj->gpu_write_list);
1777 i915_gem_object_move_to_inactive(obj);
673a394b
EA
1778 }
1779}
1780
312817a3
CW
1781static void i915_gem_reset_fences(struct drm_device *dev)
1782{
1783 struct drm_i915_private *dev_priv = dev->dev_private;
1784 int i;
1785
1786 for (i = 0; i < 16; i++) {
1787 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
7d2cb39c
CW
1788 struct drm_i915_gem_object *obj = reg->obj;
1789
1790 if (!obj)
1791 continue;
1792
1793 if (obj->tiling_mode)
1794 i915_gem_release_mmap(obj);
1795
d9e86c0e
CW
1796 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1797 reg->obj->fenced_gpu_access = false;
1798 reg->obj->last_fenced_seqno = 0;
1799 reg->obj->last_fenced_ring = NULL;
1800 i915_gem_clear_fence_reg(dev, reg);
312817a3
CW
1801 }
1802}
1803
069efc1d 1804void i915_gem_reset(struct drm_device *dev)
673a394b 1805{
77f01230 1806 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 1807 struct drm_i915_gem_object *obj;
1ec14ad3 1808 int i;
673a394b 1809
1ec14ad3
CW
1810 for (i = 0; i < I915_NUM_RINGS; i++)
1811 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
dfaae392
CW
1812
1813 /* Remove anything from the flushing lists. The GPU cache is likely
1814 * to be lost on reset along with the data, so simply move the
1815 * lost bo to the inactive list.
1816 */
1817 while (!list_empty(&dev_priv->mm.flushing_list)) {
05394f39
CW
1818 obj= list_first_entry(&dev_priv->mm.flushing_list,
1819 struct drm_i915_gem_object,
1820 mm_list);
dfaae392 1821
05394f39
CW
1822 obj->base.write_domain = 0;
1823 list_del_init(&obj->gpu_write_list);
1824 i915_gem_object_move_to_inactive(obj);
dfaae392
CW
1825 }
1826
1827 /* Move everything out of the GPU domains to ensure we do any
1828 * necessary invalidation upon reuse.
1829 */
05394f39 1830 list_for_each_entry(obj,
77f01230 1831 &dev_priv->mm.inactive_list,
69dc4987 1832 mm_list)
77f01230 1833 {
05394f39 1834 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
77f01230 1835 }
069efc1d
CW
1836
1837 /* The fence registers are invalidated so clear them out */
312817a3 1838 i915_gem_reset_fences(dev);
673a394b
EA
1839}
1840
1841/**
1842 * This function clears the request list as sequence numbers are passed.
1843 */
b09a1fec
CW
1844static void
1845i915_gem_retire_requests_ring(struct drm_device *dev,
1846 struct intel_ring_buffer *ring)
673a394b
EA
1847{
1848 drm_i915_private_t *dev_priv = dev->dev_private;
1849 uint32_t seqno;
1ec14ad3 1850 int i;
673a394b 1851
b84d5f0c
CW
1852 if (!ring->status_page.page_addr ||
1853 list_empty(&ring->request_list))
6c0594a3
KW
1854 return;
1855
23bc5982 1856 WARN_ON(i915_verify_lists(dev));
673a394b 1857
78501eac 1858 seqno = ring->get_seqno(ring);
1ec14ad3
CW
1859
1860 for (i = 0; i < I915_NUM_RINGS; i++)
1861 if (seqno >= ring->sync_seqno[i])
1862 ring->sync_seqno[i] = 0;
1863
852835f3 1864 while (!list_empty(&ring->request_list)) {
673a394b 1865 struct drm_i915_gem_request *request;
673a394b 1866
852835f3 1867 request = list_first_entry(&ring->request_list,
673a394b
EA
1868 struct drm_i915_gem_request,
1869 list);
673a394b 1870
dfaae392 1871 if (!i915_seqno_passed(seqno, request->seqno))
b84d5f0c
CW
1872 break;
1873
1874 trace_i915_gem_request_retire(dev, request->seqno);
1875
1876 list_del(&request->list);
f787a5f5 1877 i915_gem_request_remove_from_client(request);
b84d5f0c
CW
1878 kfree(request);
1879 }
673a394b 1880
b84d5f0c
CW
1881 /* Move any buffers on the active list that are no longer referenced
1882 * by the ringbuffer to the flushing/inactive lists as appropriate.
1883 */
1884 while (!list_empty(&ring->active_list)) {
05394f39 1885 struct drm_i915_gem_object *obj;
b84d5f0c 1886
05394f39
CW
1887 obj= list_first_entry(&ring->active_list,
1888 struct drm_i915_gem_object,
1889 ring_list);
673a394b 1890
05394f39 1891 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
673a394b 1892 break;
b84d5f0c 1893
05394f39 1894 if (obj->base.write_domain != 0)
b84d5f0c
CW
1895 i915_gem_object_move_to_flushing(obj);
1896 else
1897 i915_gem_object_move_to_inactive(obj);
673a394b 1898 }
9d34e5db
CW
1899
1900 if (unlikely (dev_priv->trace_irq_seqno &&
1901 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1ec14ad3 1902 ring->irq_put(ring);
9d34e5db
CW
1903 dev_priv->trace_irq_seqno = 0;
1904 }
23bc5982
CW
1905
1906 WARN_ON(i915_verify_lists(dev));
673a394b
EA
1907}
1908
b09a1fec
CW
1909void
1910i915_gem_retire_requests(struct drm_device *dev)
1911{
1912 drm_i915_private_t *dev_priv = dev->dev_private;
1ec14ad3 1913 int i;
b09a1fec 1914
be72615b 1915 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
05394f39 1916 struct drm_i915_gem_object *obj, *next;
be72615b
CW
1917
1918 /* We must be careful that during unbind() we do not
1919 * accidentally infinitely recurse into retire requests.
1920 * Currently:
1921 * retire -> free -> unbind -> wait -> retire_ring
1922 */
05394f39 1923 list_for_each_entry_safe(obj, next,
be72615b 1924 &dev_priv->mm.deferred_free_list,
69dc4987 1925 mm_list)
05394f39 1926 i915_gem_free_object_tail(obj);
be72615b
CW
1927 }
1928
1ec14ad3
CW
1929 for (i = 0; i < I915_NUM_RINGS; i++)
1930 i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
b09a1fec
CW
1931}
1932
75ef9da2 1933static void
673a394b
EA
1934i915_gem_retire_work_handler(struct work_struct *work)
1935{
1936 drm_i915_private_t *dev_priv;
1937 struct drm_device *dev;
1938
1939 dev_priv = container_of(work, drm_i915_private_t,
1940 mm.retire_work.work);
1941 dev = dev_priv->dev;
1942
891b48cf
CW
1943 /* Come back later if the device is busy... */
1944 if (!mutex_trylock(&dev->struct_mutex)) {
1945 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1946 return;
1947 }
1948
b09a1fec 1949 i915_gem_retire_requests(dev);
d1b851fc 1950
6dbe2772 1951 if (!dev_priv->mm.suspended &&
1ec14ad3
CW
1952 (!list_empty(&dev_priv->ring[RCS].request_list) ||
1953 !list_empty(&dev_priv->ring[VCS].request_list) ||
1954 !list_empty(&dev_priv->ring[BCS].request_list)))
9c9fe1f8 1955 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
673a394b
EA
1956 mutex_unlock(&dev->struct_mutex);
1957}
1958
5a5a0c64 1959int
852835f3 1960i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
8a1a49f9 1961 bool interruptible, struct intel_ring_buffer *ring)
673a394b
EA
1962{
1963 drm_i915_private_t *dev_priv = dev->dev_private;
802c7eb6 1964 u32 ier;
673a394b
EA
1965 int ret = 0;
1966
1967 BUG_ON(seqno == 0);
1968
ba1234d1 1969 if (atomic_read(&dev_priv->mm.wedged))
30dbf0c0
CW
1970 return -EAGAIN;
1971
5d97eb69 1972 if (seqno == ring->outstanding_lazy_request) {
3cce469c
CW
1973 struct drm_i915_gem_request *request;
1974
1975 request = kzalloc(sizeof(*request), GFP_KERNEL);
1976 if (request == NULL)
e35a41de 1977 return -ENOMEM;
3cce469c
CW
1978
1979 ret = i915_add_request(dev, NULL, request, ring);
1980 if (ret) {
1981 kfree(request);
1982 return ret;
1983 }
1984
1985 seqno = request->seqno;
e35a41de 1986 }
ffed1d09 1987
78501eac 1988 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
bad720ff 1989 if (HAS_PCH_SPLIT(dev))
036a4a7d
ZW
1990 ier = I915_READ(DEIER) | I915_READ(GTIER);
1991 else
1992 ier = I915_READ(IER);
802c7eb6
JB
1993 if (!ier) {
1994 DRM_ERROR("something (likely vbetool) disabled "
1995 "interrupts, re-enabling\n");
1996 i915_driver_irq_preinstall(dev);
1997 i915_driver_irq_postinstall(dev);
1998 }
1999
1c5d22f7
CW
2000 trace_i915_gem_request_wait_begin(dev, seqno);
2001
b2223497 2002 ring->waiting_seqno = seqno;
b13c2b96
CW
2003 if (ring->irq_get(ring)) {
2004 if (interruptible)
2005 ret = wait_event_interruptible(ring->irq_queue,
2006 i915_seqno_passed(ring->get_seqno(ring), seqno)
2007 || atomic_read(&dev_priv->mm.wedged));
2008 else
2009 wait_event(ring->irq_queue,
2010 i915_seqno_passed(ring->get_seqno(ring), seqno)
2011 || atomic_read(&dev_priv->mm.wedged));
2012
2013 ring->irq_put(ring);
b5ba177d
CW
2014 } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
2015 seqno) ||
2016 atomic_read(&dev_priv->mm.wedged), 3000))
2017 ret = -EBUSY;
b2223497 2018 ring->waiting_seqno = 0;
1c5d22f7
CW
2019
2020 trace_i915_gem_request_wait_end(dev, seqno);
673a394b 2021 }
ba1234d1 2022 if (atomic_read(&dev_priv->mm.wedged))
30dbf0c0 2023 ret = -EAGAIN;
673a394b
EA
2024
2025 if (ret && ret != -ERESTARTSYS)
8bff917c 2026 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
78501eac 2027 __func__, ret, seqno, ring->get_seqno(ring),
8bff917c 2028 dev_priv->next_seqno);
673a394b
EA
2029
2030 /* Directly dispatch request retiring. While we have the work queue
2031 * to handle this, the waiter on a request often wants an associated
2032 * buffer to have made it to the inactive list, and we would need
2033 * a separate wait queue to handle that.
2034 */
2035 if (ret == 0)
b09a1fec 2036 i915_gem_retire_requests_ring(dev, ring);
673a394b
EA
2037
2038 return ret;
2039}
2040
48764bf4
DV
2041/**
2042 * Waits for a sequence number to be signaled, and cleans up the
2043 * request and object lists appropriately for that event.
2044 */
2045static int
852835f3 2046i915_wait_request(struct drm_device *dev, uint32_t seqno,
a56ba56c 2047 struct intel_ring_buffer *ring)
48764bf4 2048{
852835f3 2049 return i915_do_wait_request(dev, seqno, 1, ring);
48764bf4
DV
2050}
2051
673a394b
EA
2052/**
2053 * Ensures that all rendering to the object has completed and the object is
2054 * safe to unbind from the GTT or access from the CPU.
2055 */
54cf91dc 2056int
05394f39 2057i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
2cf34d7b 2058 bool interruptible)
673a394b 2059{
05394f39 2060 struct drm_device *dev = obj->base.dev;
673a394b
EA
2061 int ret;
2062
e47c68e9
EA
2063 /* This function only exists to support waiting for existing rendering,
2064 * not for emitting required flushes.
673a394b 2065 */
05394f39 2066 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
673a394b
EA
2067
2068 /* If there is rendering queued on the buffer being evicted, wait for
2069 * it.
2070 */
05394f39 2071 if (obj->active) {
2cf34d7b 2072 ret = i915_do_wait_request(dev,
05394f39 2073 obj->last_rendering_seqno,
2cf34d7b 2074 interruptible,
05394f39 2075 obj->ring);
2cf34d7b 2076 if (ret)
673a394b
EA
2077 return ret;
2078 }
2079
2080 return 0;
2081}
2082
2083/**
2084 * Unbinds an object from the GTT aperture.
2085 */
0f973f27 2086int
05394f39 2087i915_gem_object_unbind(struct drm_i915_gem_object *obj)
673a394b 2088{
673a394b
EA
2089 int ret = 0;
2090
05394f39 2091 if (obj->gtt_space == NULL)
673a394b
EA
2092 return 0;
2093
05394f39 2094 if (obj->pin_count != 0) {
673a394b
EA
2095 DRM_ERROR("Attempting to unbind pinned buffer\n");
2096 return -EINVAL;
2097 }
2098
5323fd04
EA
2099 /* blow away mappings if mapped through GTT */
2100 i915_gem_release_mmap(obj);
2101
673a394b
EA
2102 /* Move the object to the CPU domain to ensure that
2103 * any possible CPU writes while it's not in the GTT
2104 * are flushed when we go to remap it. This will
2105 * also ensure that all pending GPU writes are finished
2106 * before we unbind.
2107 */
e47c68e9 2108 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
8dc1775d 2109 if (ret == -ERESTARTSYS)
673a394b 2110 return ret;
8dc1775d
CW
2111 /* Continue on if we fail due to EIO, the GPU is hung so we
2112 * should be safe and we need to cleanup or else we might
2113 * cause memory corruption through use-after-free.
2114 */
812ed492
CW
2115 if (ret) {
2116 i915_gem_clflush_object(obj);
05394f39 2117 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
812ed492 2118 }
673a394b 2119
96b47b65 2120 /* release the fence reg _after_ flushing */
d9e86c0e
CW
2121 ret = i915_gem_object_put_fence(obj);
2122 if (ret == -ERESTARTSYS)
2123 return ret;
96b47b65 2124
7c2e6fdf 2125 i915_gem_gtt_unbind_object(obj);
e5281ccd 2126 i915_gem_object_put_pages_gtt(obj);
673a394b 2127
6299f992 2128 list_del_init(&obj->gtt_list);
05394f39 2129 list_del_init(&obj->mm_list);
75e9e915 2130 /* Avoid an unnecessary call to unbind on rebind. */
05394f39 2131 obj->map_and_fenceable = true;
673a394b 2132
05394f39
CW
2133 drm_mm_put_block(obj->gtt_space);
2134 obj->gtt_space = NULL;
2135 obj->gtt_offset = 0;
673a394b 2136
05394f39 2137 if (i915_gem_object_is_purgeable(obj))
963b4836
CW
2138 i915_gem_object_truncate(obj);
2139
1c5d22f7
CW
2140 trace_i915_gem_object_unbind(obj);
2141
8dc1775d 2142 return ret;
673a394b
EA
2143}
2144
54cf91dc
CW
2145void
2146i915_gem_flush_ring(struct drm_device *dev,
2147 struct intel_ring_buffer *ring,
2148 uint32_t invalidate_domains,
2149 uint32_t flush_domains)
2150{
2151 ring->flush(ring, invalidate_domains, flush_domains);
2152 i915_gem_process_flushing_list(dev, flush_domains, ring);
2153}
2154
a56ba56c
CW
2155static int i915_ring_idle(struct drm_device *dev,
2156 struct intel_ring_buffer *ring)
2157{
395b70be 2158 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
64193406
CW
2159 return 0;
2160
0ac74c6b
CW
2161 if (!list_empty(&ring->gpu_write_list))
2162 i915_gem_flush_ring(dev, ring,
2163 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
a56ba56c
CW
2164 return i915_wait_request(dev,
2165 i915_gem_next_request_seqno(dev, ring),
2166 ring);
2167}
2168
b47eb4a2 2169int
4df2faf4
DV
2170i915_gpu_idle(struct drm_device *dev)
2171{
2172 drm_i915_private_t *dev_priv = dev->dev_private;
2173 bool lists_empty;
1ec14ad3 2174 int ret, i;
4df2faf4 2175
d1b851fc 2176 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
395b70be 2177 list_empty(&dev_priv->mm.active_list));
4df2faf4
DV
2178 if (lists_empty)
2179 return 0;
2180
2181 /* Flush everything onto the inactive list. */
1ec14ad3
CW
2182 for (i = 0; i < I915_NUM_RINGS; i++) {
2183 ret = i915_ring_idle(dev, &dev_priv->ring[i]);
2184 if (ret)
2185 return ret;
2186 }
4df2faf4 2187
8a1a49f9 2188 return 0;
4df2faf4
DV
2189}
2190
c6642782
DV
2191static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2192 struct intel_ring_buffer *pipelined)
4e901fdc 2193{
05394f39 2194 struct drm_device *dev = obj->base.dev;
4e901fdc 2195 drm_i915_private_t *dev_priv = dev->dev_private;
05394f39
CW
2196 u32 size = obj->gtt_space->size;
2197 int regnum = obj->fence_reg;
4e901fdc
EA
2198 uint64_t val;
2199
05394f39 2200 val = (uint64_t)((obj->gtt_offset + size - 4096) &
c6642782 2201 0xfffff000) << 32;
05394f39
CW
2202 val |= obj->gtt_offset & 0xfffff000;
2203 val |= (uint64_t)((obj->stride / 128) - 1) <<
4e901fdc
EA
2204 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2205
05394f39 2206 if (obj->tiling_mode == I915_TILING_Y)
4e901fdc
EA
2207 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2208 val |= I965_FENCE_REG_VALID;
2209
c6642782
DV
2210 if (pipelined) {
2211 int ret = intel_ring_begin(pipelined, 6);
2212 if (ret)
2213 return ret;
2214
2215 intel_ring_emit(pipelined, MI_NOOP);
2216 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2217 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2218 intel_ring_emit(pipelined, (u32)val);
2219 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2220 intel_ring_emit(pipelined, (u32)(val >> 32));
2221 intel_ring_advance(pipelined);
2222 } else
2223 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2224
2225 return 0;
4e901fdc
EA
2226}
2227
c6642782
DV
2228static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2229 struct intel_ring_buffer *pipelined)
de151cf6 2230{
05394f39 2231 struct drm_device *dev = obj->base.dev;
de151cf6 2232 drm_i915_private_t *dev_priv = dev->dev_private;
05394f39
CW
2233 u32 size = obj->gtt_space->size;
2234 int regnum = obj->fence_reg;
de151cf6
JB
2235 uint64_t val;
2236
05394f39 2237 val = (uint64_t)((obj->gtt_offset + size - 4096) &
de151cf6 2238 0xfffff000) << 32;
05394f39
CW
2239 val |= obj->gtt_offset & 0xfffff000;
2240 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2241 if (obj->tiling_mode == I915_TILING_Y)
de151cf6
JB
2242 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2243 val |= I965_FENCE_REG_VALID;
2244
c6642782
DV
2245 if (pipelined) {
2246 int ret = intel_ring_begin(pipelined, 6);
2247 if (ret)
2248 return ret;
2249
2250 intel_ring_emit(pipelined, MI_NOOP);
2251 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2252 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2253 intel_ring_emit(pipelined, (u32)val);
2254 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2255 intel_ring_emit(pipelined, (u32)(val >> 32));
2256 intel_ring_advance(pipelined);
2257 } else
2258 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2259
2260 return 0;
de151cf6
JB
2261}
2262
c6642782
DV
2263static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2264 struct intel_ring_buffer *pipelined)
de151cf6 2265{
05394f39 2266 struct drm_device *dev = obj->base.dev;
de151cf6 2267 drm_i915_private_t *dev_priv = dev->dev_private;
05394f39 2268 u32 size = obj->gtt_space->size;
c6642782 2269 u32 fence_reg, val, pitch_val;
0f973f27 2270 int tile_width;
de151cf6 2271
c6642782
DV
2272 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2273 (size & -size) != size ||
2274 (obj->gtt_offset & (size - 1)),
2275 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2276 obj->gtt_offset, obj->map_and_fenceable, size))
2277 return -EINVAL;
de151cf6 2278
c6642782 2279 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
0f973f27 2280 tile_width = 128;
de151cf6 2281 else
0f973f27
JB
2282 tile_width = 512;
2283
2284 /* Note: pitch better be a power of two tile widths */
05394f39 2285 pitch_val = obj->stride / tile_width;
0f973f27 2286 pitch_val = ffs(pitch_val) - 1;
de151cf6 2287
05394f39
CW
2288 val = obj->gtt_offset;
2289 if (obj->tiling_mode == I915_TILING_Y)
de151cf6 2290 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
a00b10c3 2291 val |= I915_FENCE_SIZE_BITS(size);
de151cf6
JB
2292 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2293 val |= I830_FENCE_REG_VALID;
2294
05394f39 2295 fence_reg = obj->fence_reg;
a00b10c3
CW
2296 if (fence_reg < 8)
2297 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
dc529a4f 2298 else
a00b10c3 2299 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
c6642782
DV
2300
2301 if (pipelined) {
2302 int ret = intel_ring_begin(pipelined, 4);
2303 if (ret)
2304 return ret;
2305
2306 intel_ring_emit(pipelined, MI_NOOP);
2307 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2308 intel_ring_emit(pipelined, fence_reg);
2309 intel_ring_emit(pipelined, val);
2310 intel_ring_advance(pipelined);
2311 } else
2312 I915_WRITE(fence_reg, val);
2313
2314 return 0;
de151cf6
JB
2315}
2316
c6642782
DV
2317static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2318 struct intel_ring_buffer *pipelined)
de151cf6 2319{
05394f39 2320 struct drm_device *dev = obj->base.dev;
de151cf6 2321 drm_i915_private_t *dev_priv = dev->dev_private;
05394f39
CW
2322 u32 size = obj->gtt_space->size;
2323 int regnum = obj->fence_reg;
de151cf6
JB
2324 uint32_t val;
2325 uint32_t pitch_val;
2326
c6642782
DV
2327 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2328 (size & -size) != size ||
2329 (obj->gtt_offset & (size - 1)),
2330 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2331 obj->gtt_offset, size))
2332 return -EINVAL;
de151cf6 2333
05394f39 2334 pitch_val = obj->stride / 128;
e76a16de 2335 pitch_val = ffs(pitch_val) - 1;
e76a16de 2336
05394f39
CW
2337 val = obj->gtt_offset;
2338 if (obj->tiling_mode == I915_TILING_Y)
de151cf6 2339 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
c6642782 2340 val |= I830_FENCE_SIZE_BITS(size);
de151cf6
JB
2341 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2342 val |= I830_FENCE_REG_VALID;
2343
c6642782
DV
2344 if (pipelined) {
2345 int ret = intel_ring_begin(pipelined, 4);
2346 if (ret)
2347 return ret;
2348
2349 intel_ring_emit(pipelined, MI_NOOP);
2350 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2351 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2352 intel_ring_emit(pipelined, val);
2353 intel_ring_advance(pipelined);
2354 } else
2355 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2356
2357 return 0;
de151cf6
JB
2358}
2359
d9e86c0e
CW
2360static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2361{
2362 return i915_seqno_passed(ring->get_seqno(ring), seqno);
2363}
2364
2365static int
2366i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2367 struct intel_ring_buffer *pipelined,
2368 bool interruptible)
2369{
2370 int ret;
2371
2372 if (obj->fenced_gpu_access) {
2373 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
2374 i915_gem_flush_ring(obj->base.dev,
2375 obj->last_fenced_ring,
2376 0, obj->base.write_domain);
2377
2378 obj->fenced_gpu_access = false;
2379 }
2380
2381 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2382 if (!ring_passed_seqno(obj->last_fenced_ring,
2383 obj->last_fenced_seqno)) {
2384 ret = i915_do_wait_request(obj->base.dev,
2385 obj->last_fenced_seqno,
2386 interruptible,
2387 obj->last_fenced_ring);
2388 if (ret)
2389 return ret;
2390 }
2391
2392 obj->last_fenced_seqno = 0;
2393 obj->last_fenced_ring = NULL;
2394 }
2395
2396 return 0;
2397}
2398
2399int
2400i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2401{
2402 int ret;
2403
2404 if (obj->tiling_mode)
2405 i915_gem_release_mmap(obj);
2406
2407 ret = i915_gem_object_flush_fence(obj, NULL, true);
2408 if (ret)
2409 return ret;
2410
2411 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2412 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2413 i915_gem_clear_fence_reg(obj->base.dev,
2414 &dev_priv->fence_regs[obj->fence_reg]);
2415
2416 obj->fence_reg = I915_FENCE_REG_NONE;
2417 }
2418
2419 return 0;
2420}
2421
2422static struct drm_i915_fence_reg *
2423i915_find_fence_reg(struct drm_device *dev,
2424 struct intel_ring_buffer *pipelined)
ae3db24a 2425{
ae3db24a 2426 struct drm_i915_private *dev_priv = dev->dev_private;
d9e86c0e
CW
2427 struct drm_i915_fence_reg *reg, *first, *avail;
2428 int i;
ae3db24a
DV
2429
2430 /* First try to find a free reg */
d9e86c0e 2431 avail = NULL;
ae3db24a
DV
2432 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2433 reg = &dev_priv->fence_regs[i];
2434 if (!reg->obj)
d9e86c0e 2435 return reg;
ae3db24a 2436
05394f39 2437 if (!reg->obj->pin_count)
d9e86c0e 2438 avail = reg;
ae3db24a
DV
2439 }
2440
d9e86c0e
CW
2441 if (avail == NULL)
2442 return NULL;
ae3db24a
DV
2443
2444 /* None available, try to steal one or wait for a user to finish */
d9e86c0e
CW
2445 avail = first = NULL;
2446 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2447 if (reg->obj->pin_count)
ae3db24a
DV
2448 continue;
2449
d9e86c0e
CW
2450 if (first == NULL)
2451 first = reg;
2452
2453 if (!pipelined ||
2454 !reg->obj->last_fenced_ring ||
2455 reg->obj->last_fenced_ring == pipelined) {
2456 avail = reg;
2457 break;
2458 }
ae3db24a
DV
2459 }
2460
d9e86c0e
CW
2461 if (avail == NULL)
2462 avail = first;
ae3db24a 2463
a00b10c3 2464 return avail;
ae3db24a
DV
2465}
2466
de151cf6 2467/**
d9e86c0e 2468 * i915_gem_object_get_fence - set up a fence reg for an object
de151cf6 2469 * @obj: object to map through a fence reg
d9e86c0e
CW
2470 * @pipelined: ring on which to queue the change, or NULL for CPU access
2471 * @interruptible: must we wait uninterruptibly for the register to retire?
de151cf6
JB
2472 *
2473 * When mapping objects through the GTT, userspace wants to be able to write
2474 * to them without having to worry about swizzling if the object is tiled.
2475 *
2476 * This function walks the fence regs looking for a free one for @obj,
2477 * stealing one if it can't find any.
2478 *
2479 * It then sets up the reg based on the object's properties: address, pitch
2480 * and tiling format.
2481 */
8c4b8c3f 2482int
d9e86c0e
CW
2483i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2484 struct intel_ring_buffer *pipelined,
2485 bool interruptible)
de151cf6 2486{
05394f39 2487 struct drm_device *dev = obj->base.dev;
79e53945 2488 struct drm_i915_private *dev_priv = dev->dev_private;
d9e86c0e 2489 struct drm_i915_fence_reg *reg;
ae3db24a 2490 int ret;
de151cf6 2491
6bda10d1
CW
2492 /* XXX disable pipelining. There are bugs. Shocking. */
2493 pipelined = NULL;
2494
d9e86c0e 2495 /* Just update our place in the LRU if our fence is getting reused. */
05394f39
CW
2496 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2497 reg = &dev_priv->fence_regs[obj->fence_reg];
007cc8ac 2498 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
d9e86c0e
CW
2499
2500 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2501 pipelined = NULL;
2502
2503 if (!pipelined) {
2504 if (reg->setup_seqno) {
2505 if (!ring_passed_seqno(obj->last_fenced_ring,
2506 reg->setup_seqno)) {
2507 ret = i915_do_wait_request(obj->base.dev,
2508 reg->setup_seqno,
2509 interruptible,
2510 obj->last_fenced_ring);
2511 if (ret)
2512 return ret;
2513 }
2514
2515 reg->setup_seqno = 0;
2516 }
2517 } else if (obj->last_fenced_ring &&
2518 obj->last_fenced_ring != pipelined) {
2519 ret = i915_gem_object_flush_fence(obj,
2520 pipelined,
2521 interruptible);
2522 if (ret)
2523 return ret;
2524 } else if (obj->tiling_changed) {
2525 if (obj->fenced_gpu_access) {
2526 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
2527 i915_gem_flush_ring(obj->base.dev, obj->ring,
2528 0, obj->base.write_domain);
2529
2530 obj->fenced_gpu_access = false;
2531 }
2532 }
2533
2534 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2535 pipelined = NULL;
2536 BUG_ON(!pipelined && reg->setup_seqno);
2537
2538 if (obj->tiling_changed) {
2539 if (pipelined) {
2540 reg->setup_seqno =
2541 i915_gem_next_request_seqno(dev, pipelined);
2542 obj->last_fenced_seqno = reg->setup_seqno;
2543 obj->last_fenced_ring = pipelined;
2544 }
2545 goto update;
2546 }
2547
a09ba7fa
EA
2548 return 0;
2549 }
2550
d9e86c0e
CW
2551 reg = i915_find_fence_reg(dev, pipelined);
2552 if (reg == NULL)
2553 return -ENOSPC;
de151cf6 2554
d9e86c0e
CW
2555 ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
2556 if (ret)
ae3db24a 2557 return ret;
de151cf6 2558
d9e86c0e
CW
2559 if (reg->obj) {
2560 struct drm_i915_gem_object *old = reg->obj;
2561
2562 drm_gem_object_reference(&old->base);
2563
2564 if (old->tiling_mode)
2565 i915_gem_release_mmap(old);
2566
d9e86c0e 2567 ret = i915_gem_object_flush_fence(old,
6bda10d1 2568 pipelined,
d9e86c0e
CW
2569 interruptible);
2570 if (ret) {
2571 drm_gem_object_unreference(&old->base);
2572 return ret;
2573 }
2574
2575 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2576 pipelined = NULL;
2577
2578 old->fence_reg = I915_FENCE_REG_NONE;
2579 old->last_fenced_ring = pipelined;
2580 old->last_fenced_seqno =
2581 pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
2582
2583 drm_gem_object_unreference(&old->base);
2584 } else if (obj->last_fenced_seqno == 0)
2585 pipelined = NULL;
a09ba7fa 2586
de151cf6 2587 reg->obj = obj;
d9e86c0e
CW
2588 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2589 obj->fence_reg = reg - dev_priv->fence_regs;
2590 obj->last_fenced_ring = pipelined;
de151cf6 2591
d9e86c0e
CW
2592 reg->setup_seqno =
2593 pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
2594 obj->last_fenced_seqno = reg->setup_seqno;
2595
2596update:
2597 obj->tiling_changed = false;
e259befd
CW
2598 switch (INTEL_INFO(dev)->gen) {
2599 case 6:
c6642782 2600 ret = sandybridge_write_fence_reg(obj, pipelined);
e259befd
CW
2601 break;
2602 case 5:
2603 case 4:
c6642782 2604 ret = i965_write_fence_reg(obj, pipelined);
e259befd
CW
2605 break;
2606 case 3:
c6642782 2607 ret = i915_write_fence_reg(obj, pipelined);
e259befd
CW
2608 break;
2609 case 2:
c6642782 2610 ret = i830_write_fence_reg(obj, pipelined);
e259befd
CW
2611 break;
2612 }
d9ddcb96 2613
c6642782 2614 return ret;
de151cf6
JB
2615}
2616
2617/**
2618 * i915_gem_clear_fence_reg - clear out fence register info
2619 * @obj: object to clear
2620 *
2621 * Zeroes out the fence register itself and clears out the associated
05394f39 2622 * data structures in dev_priv and obj.
de151cf6
JB
2623 */
2624static void
d9e86c0e
CW
2625i915_gem_clear_fence_reg(struct drm_device *dev,
2626 struct drm_i915_fence_reg *reg)
de151cf6 2627{
79e53945 2628 drm_i915_private_t *dev_priv = dev->dev_private;
d9e86c0e 2629 uint32_t fence_reg = reg - dev_priv->fence_regs;
de151cf6 2630
e259befd
CW
2631 switch (INTEL_INFO(dev)->gen) {
2632 case 6:
d9e86c0e 2633 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
e259befd
CW
2634 break;
2635 case 5:
2636 case 4:
d9e86c0e 2637 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
e259befd
CW
2638 break;
2639 case 3:
d9e86c0e
CW
2640 if (fence_reg >= 8)
2641 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
dc529a4f 2642 else
e259befd 2643 case 2:
d9e86c0e 2644 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
dc529a4f
EA
2645
2646 I915_WRITE(fence_reg, 0);
e259befd 2647 break;
dc529a4f 2648 }
de151cf6 2649
007cc8ac 2650 list_del_init(&reg->lru_list);
d9e86c0e
CW
2651 reg->obj = NULL;
2652 reg->setup_seqno = 0;
52dc7d32
CW
2653}
2654
673a394b
EA
2655/**
2656 * Finds free space in the GTT aperture and binds the object there.
2657 */
2658static int
05394f39 2659i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
920afa77 2660 unsigned alignment,
75e9e915 2661 bool map_and_fenceable)
673a394b 2662{
05394f39 2663 struct drm_device *dev = obj->base.dev;
673a394b 2664 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 2665 struct drm_mm_node *free_space;
a00b10c3 2666 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
5e783301 2667 u32 size, fence_size, fence_alignment, unfenced_alignment;
75e9e915 2668 bool mappable, fenceable;
07f73f69 2669 int ret;
673a394b 2670
05394f39 2671 if (obj->madv != I915_MADV_WILLNEED) {
3ef94daa
CW
2672 DRM_ERROR("Attempting to bind a purgeable object\n");
2673 return -EINVAL;
2674 }
2675
05394f39
CW
2676 fence_size = i915_gem_get_gtt_size(obj);
2677 fence_alignment = i915_gem_get_gtt_alignment(obj);
2678 unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
a00b10c3 2679
673a394b 2680 if (alignment == 0)
5e783301
DV
2681 alignment = map_and_fenceable ? fence_alignment :
2682 unfenced_alignment;
75e9e915 2683 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
673a394b
EA
2684 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2685 return -EINVAL;
2686 }
2687
05394f39 2688 size = map_and_fenceable ? fence_size : obj->base.size;
a00b10c3 2689
654fc607
CW
2690 /* If the object is bigger than the entire aperture, reject it early
2691 * before evicting everything in a vain attempt to find space.
2692 */
05394f39 2693 if (obj->base.size >
75e9e915 2694 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
654fc607
CW
2695 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2696 return -E2BIG;
2697 }
2698
673a394b 2699 search_free:
75e9e915 2700 if (map_and_fenceable)
920afa77
DV
2701 free_space =
2702 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
a00b10c3 2703 size, alignment, 0,
920afa77
DV
2704 dev_priv->mm.gtt_mappable_end,
2705 0);
2706 else
2707 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
a00b10c3 2708 size, alignment, 0);
920afa77
DV
2709
2710 if (free_space != NULL) {
75e9e915 2711 if (map_and_fenceable)
05394f39 2712 obj->gtt_space =
920afa77 2713 drm_mm_get_block_range_generic(free_space,
a00b10c3 2714 size, alignment, 0,
920afa77
DV
2715 dev_priv->mm.gtt_mappable_end,
2716 0);
2717 else
05394f39 2718 obj->gtt_space =
a00b10c3 2719 drm_mm_get_block(free_space, size, alignment);
920afa77 2720 }
05394f39 2721 if (obj->gtt_space == NULL) {
673a394b
EA
2722 /* If the gtt is empty and we're still having trouble
2723 * fitting our object in, we're out of memory.
2724 */
75e9e915
DV
2725 ret = i915_gem_evict_something(dev, size, alignment,
2726 map_and_fenceable);
9731129c 2727 if (ret)
673a394b 2728 return ret;
9731129c 2729
673a394b
EA
2730 goto search_free;
2731 }
2732
e5281ccd 2733 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
673a394b 2734 if (ret) {
05394f39
CW
2735 drm_mm_put_block(obj->gtt_space);
2736 obj->gtt_space = NULL;
07f73f69
CW
2737
2738 if (ret == -ENOMEM) {
2739 /* first try to clear up some space from the GTT */
a00b10c3 2740 ret = i915_gem_evict_something(dev, size,
75e9e915
DV
2741 alignment,
2742 map_and_fenceable);
07f73f69 2743 if (ret) {
07f73f69 2744 /* now try to shrink everyone else */
4bdadb97
CW
2745 if (gfpmask) {
2746 gfpmask = 0;
2747 goto search_free;
07f73f69
CW
2748 }
2749
2750 return ret;
2751 }
2752
2753 goto search_free;
2754 }
2755
673a394b
EA
2756 return ret;
2757 }
2758
7c2e6fdf
DV
2759 ret = i915_gem_gtt_bind_object(obj);
2760 if (ret) {
e5281ccd 2761 i915_gem_object_put_pages_gtt(obj);
05394f39
CW
2762 drm_mm_put_block(obj->gtt_space);
2763 obj->gtt_space = NULL;
07f73f69 2764
a00b10c3 2765 ret = i915_gem_evict_something(dev, size,
75e9e915 2766 alignment, map_and_fenceable);
9731129c 2767 if (ret)
07f73f69 2768 return ret;
07f73f69
CW
2769
2770 goto search_free;
673a394b 2771 }
673a394b 2772
6299f992 2773 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
05394f39 2774 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
bf1a1092 2775
673a394b
EA
2776 /* Assert that the object is not currently in any GPU domain. As it
2777 * wasn't in the GTT, there shouldn't be any way it could have been in
2778 * a GPU cache
2779 */
05394f39
CW
2780 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2781 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
673a394b 2782
6299f992 2783 obj->gtt_offset = obj->gtt_space->start;
1c5d22f7 2784
75e9e915 2785 fenceable =
05394f39
CW
2786 obj->gtt_space->size == fence_size &&
2787 (obj->gtt_space->start & (fence_alignment -1)) == 0;
a00b10c3 2788
75e9e915 2789 mappable =
05394f39 2790 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
a00b10c3 2791
05394f39 2792 obj->map_and_fenceable = mappable && fenceable;
75e9e915 2793
6299f992 2794 trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
673a394b
EA
2795 return 0;
2796}
2797
2798void
05394f39 2799i915_gem_clflush_object(struct drm_i915_gem_object *obj)
673a394b 2800{
673a394b
EA
2801 /* If we don't have a page list set up, then we're not pinned
2802 * to GPU, and we can ignore the cache flush because it'll happen
2803 * again at bind time.
2804 */
05394f39 2805 if (obj->pages == NULL)
673a394b
EA
2806 return;
2807
1c5d22f7 2808 trace_i915_gem_object_clflush(obj);
cfa16a0d 2809
05394f39 2810 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
673a394b
EA
2811}
2812
e47c68e9 2813/** Flushes any GPU write domain for the object if it's dirty. */
3619df03
CW
2814static void
2815i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 2816{
05394f39 2817 struct drm_device *dev = obj->base.dev;
e47c68e9 2818
05394f39 2819 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
3619df03 2820 return;
e47c68e9
EA
2821
2822 /* Queue the GPU write cache flushing we need. */
05394f39
CW
2823 i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
2824 BUG_ON(obj->base.write_domain);
e47c68e9
EA
2825}
2826
2827/** Flushes the GTT write domain for the object if it's dirty. */
2828static void
05394f39 2829i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 2830{
1c5d22f7
CW
2831 uint32_t old_write_domain;
2832
05394f39 2833 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
e47c68e9
EA
2834 return;
2835
2836 /* No actual flushing is required for the GTT write domain. Writes
2837 * to it immediately go to main memory as far as we know, so there's
2838 * no chipset flush. It also doesn't land in render cache.
2839 */
4a684a41
CW
2840 i915_gem_release_mmap(obj);
2841
05394f39
CW
2842 old_write_domain = obj->base.write_domain;
2843 obj->base.write_domain = 0;
1c5d22f7
CW
2844
2845 trace_i915_gem_object_change_domain(obj,
05394f39 2846 obj->base.read_domains,
1c5d22f7 2847 old_write_domain);
e47c68e9
EA
2848}
2849
2850/** Flushes the CPU write domain for the object if it's dirty. */
2851static void
05394f39 2852i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 2853{
1c5d22f7 2854 uint32_t old_write_domain;
e47c68e9 2855
05394f39 2856 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
e47c68e9
EA
2857 return;
2858
2859 i915_gem_clflush_object(obj);
40ce6575 2860 intel_gtt_chipset_flush();
05394f39
CW
2861 old_write_domain = obj->base.write_domain;
2862 obj->base.write_domain = 0;
1c5d22f7
CW
2863
2864 trace_i915_gem_object_change_domain(obj,
05394f39 2865 obj->base.read_domains,
1c5d22f7 2866 old_write_domain);
e47c68e9
EA
2867}
2868
2ef7eeaa
EA
2869/**
2870 * Moves a single object to the GTT read, and possibly write domain.
2871 *
2872 * This function returns when the move is complete, including waiting on
2873 * flushes to occur.
2874 */
79e53945 2875int
2021746e 2876i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2ef7eeaa 2877{
1c5d22f7 2878 uint32_t old_write_domain, old_read_domains;
e47c68e9 2879 int ret;
2ef7eeaa 2880
02354392 2881 /* Not valid to be called on unbound objects. */
05394f39 2882 if (obj->gtt_space == NULL)
02354392
EA
2883 return -EINVAL;
2884
3619df03 2885 i915_gem_object_flush_gpu_write_domain(obj);
87ca9c8a
CW
2886 if (obj->pending_gpu_write || write) {
2887 ret = i915_gem_object_wait_rendering(obj, true);
2888 if (ret)
2889 return ret;
2890 }
2dafb1e0 2891
7213342d 2892 i915_gem_object_flush_cpu_write_domain(obj);
1c5d22f7 2893
05394f39
CW
2894 old_write_domain = obj->base.write_domain;
2895 old_read_domains = obj->base.read_domains;
1c5d22f7 2896
e47c68e9
EA
2897 /* It should now be out of any other write domains, and we can update
2898 * the domain values for our changes.
2899 */
05394f39
CW
2900 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2901 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
e47c68e9 2902 if (write) {
05394f39
CW
2903 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2904 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2905 obj->dirty = 1;
2ef7eeaa
EA
2906 }
2907
1c5d22f7
CW
2908 trace_i915_gem_object_change_domain(obj,
2909 old_read_domains,
2910 old_write_domain);
2911
e47c68e9
EA
2912 return 0;
2913}
2914
b9241ea3
ZW
2915/*
2916 * Prepare buffer for display plane. Use uninterruptible for possible flush
2917 * wait, as in modesetting process we're not supposed to be interrupted.
2918 */
2919int
05394f39 2920i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
919926ae 2921 struct intel_ring_buffer *pipelined)
b9241ea3 2922{
ba3d8d74 2923 uint32_t old_read_domains;
b9241ea3
ZW
2924 int ret;
2925
2926 /* Not valid to be called on unbound objects. */
05394f39 2927 if (obj->gtt_space == NULL)
b9241ea3
ZW
2928 return -EINVAL;
2929
3619df03 2930 i915_gem_object_flush_gpu_write_domain(obj);
b9241ea3 2931
ced270fa 2932 /* Currently, we are always called from an non-interruptible context. */
0be73284 2933 if (pipelined != obj->ring) {
ced270fa
CW
2934 ret = i915_gem_object_wait_rendering(obj, false);
2935 if (ret)
b9241ea3
ZW
2936 return ret;
2937 }
2938
b118c1e3
CW
2939 i915_gem_object_flush_cpu_write_domain(obj);
2940
05394f39
CW
2941 old_read_domains = obj->base.read_domains;
2942 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
b9241ea3
ZW
2943
2944 trace_i915_gem_object_change_domain(obj,
2945 old_read_domains,
05394f39 2946 obj->base.write_domain);
b9241ea3
ZW
2947
2948 return 0;
2949}
2950
85345517
CW
2951int
2952i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
2953 bool interruptible)
2954{
2955 if (!obj->active)
2956 return 0;
2957
2958 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
05394f39 2959 i915_gem_flush_ring(obj->base.dev, obj->ring,
85345517
CW
2960 0, obj->base.write_domain);
2961
05394f39 2962 return i915_gem_object_wait_rendering(obj, interruptible);
85345517
CW
2963}
2964
e47c68e9
EA
2965/**
2966 * Moves a single object to the CPU read, and possibly write domain.
2967 *
2968 * This function returns when the move is complete, including waiting on
2969 * flushes to occur.
2970 */
2971static int
919926ae 2972i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
e47c68e9 2973{
1c5d22f7 2974 uint32_t old_write_domain, old_read_domains;
e47c68e9
EA
2975 int ret;
2976
3619df03 2977 i915_gem_object_flush_gpu_write_domain(obj);
de18a29e
DV
2978 ret = i915_gem_object_wait_rendering(obj, true);
2979 if (ret)
e47c68e9 2980 return ret;
2ef7eeaa 2981
e47c68e9 2982 i915_gem_object_flush_gtt_write_domain(obj);
2ef7eeaa 2983
e47c68e9
EA
2984 /* If we have a partially-valid cache of the object in the CPU,
2985 * finish invalidating it and free the per-page flags.
2ef7eeaa 2986 */
e47c68e9 2987 i915_gem_object_set_to_full_cpu_read_domain(obj);
2ef7eeaa 2988
05394f39
CW
2989 old_write_domain = obj->base.write_domain;
2990 old_read_domains = obj->base.read_domains;
1c5d22f7 2991
e47c68e9 2992 /* Flush the CPU cache if it's still invalid. */
05394f39 2993 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2ef7eeaa 2994 i915_gem_clflush_object(obj);
2ef7eeaa 2995
05394f39 2996 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
2997 }
2998
2999 /* It should now be out of any other write domains, and we can update
3000 * the domain values for our changes.
3001 */
05394f39 3002 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
e47c68e9
EA
3003
3004 /* If we're writing through the CPU, then the GPU read domains will
3005 * need to be invalidated at next use.
3006 */
3007 if (write) {
05394f39
CW
3008 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3009 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
e47c68e9 3010 }
2ef7eeaa 3011
1c5d22f7
CW
3012 trace_i915_gem_object_change_domain(obj,
3013 old_read_domains,
3014 old_write_domain);
3015
2ef7eeaa
EA
3016 return 0;
3017}
3018
673a394b 3019/**
e47c68e9 3020 * Moves the object from a partially CPU read to a full one.
673a394b 3021 *
e47c68e9
EA
3022 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3023 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
673a394b 3024 */
e47c68e9 3025static void
05394f39 3026i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
673a394b 3027{
05394f39 3028 if (!obj->page_cpu_valid)
e47c68e9
EA
3029 return;
3030
3031 /* If we're partially in the CPU read domain, finish moving it in.
3032 */
05394f39 3033 if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
e47c68e9
EA
3034 int i;
3035
05394f39
CW
3036 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3037 if (obj->page_cpu_valid[i])
e47c68e9 3038 continue;
05394f39 3039 drm_clflush_pages(obj->pages + i, 1);
e47c68e9 3040 }
e47c68e9
EA
3041 }
3042
3043 /* Free the page_cpu_valid mappings which are now stale, whether
3044 * or not we've got I915_GEM_DOMAIN_CPU.
3045 */
05394f39
CW
3046 kfree(obj->page_cpu_valid);
3047 obj->page_cpu_valid = NULL;
e47c68e9
EA
3048}
3049
3050/**
3051 * Set the CPU read domain on a range of the object.
3052 *
3053 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3054 * not entirely valid. The page_cpu_valid member of the object flags which
3055 * pages have been flushed, and will be respected by
3056 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3057 * of the whole object.
3058 *
3059 * This function returns when the move is complete, including waiting on
3060 * flushes to occur.
3061 */
3062static int
05394f39 3063i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
e47c68e9
EA
3064 uint64_t offset, uint64_t size)
3065{
1c5d22f7 3066 uint32_t old_read_domains;
e47c68e9 3067 int i, ret;
673a394b 3068
05394f39 3069 if (offset == 0 && size == obj->base.size)
e47c68e9 3070 return i915_gem_object_set_to_cpu_domain(obj, 0);
673a394b 3071
3619df03 3072 i915_gem_object_flush_gpu_write_domain(obj);
de18a29e
DV
3073 ret = i915_gem_object_wait_rendering(obj, true);
3074 if (ret)
6a47baa6 3075 return ret;
de18a29e 3076
e47c68e9
EA
3077 i915_gem_object_flush_gtt_write_domain(obj);
3078
3079 /* If we're already fully in the CPU read domain, we're done. */
05394f39
CW
3080 if (obj->page_cpu_valid == NULL &&
3081 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
e47c68e9 3082 return 0;
673a394b 3083
e47c68e9
EA
3084 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3085 * newly adding I915_GEM_DOMAIN_CPU
3086 */
05394f39
CW
3087 if (obj->page_cpu_valid == NULL) {
3088 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3089 GFP_KERNEL);
3090 if (obj->page_cpu_valid == NULL)
e47c68e9 3091 return -ENOMEM;
05394f39
CW
3092 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3093 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
673a394b
EA
3094
3095 /* Flush the cache on any pages that are still invalid from the CPU's
3096 * perspective.
3097 */
e47c68e9
EA
3098 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3099 i++) {
05394f39 3100 if (obj->page_cpu_valid[i])
673a394b
EA
3101 continue;
3102
05394f39 3103 drm_clflush_pages(obj->pages + i, 1);
673a394b 3104
05394f39 3105 obj->page_cpu_valid[i] = 1;
673a394b
EA
3106 }
3107
e47c68e9
EA
3108 /* It should now be out of any other write domains, and we can update
3109 * the domain values for our changes.
3110 */
05394f39 3111 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
e47c68e9 3112
05394f39
CW
3113 old_read_domains = obj->base.read_domains;
3114 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
e47c68e9 3115
1c5d22f7
CW
3116 trace_i915_gem_object_change_domain(obj,
3117 old_read_domains,
05394f39 3118 obj->base.write_domain);
1c5d22f7 3119
673a394b
EA
3120 return 0;
3121}
3122
673a394b
EA
3123/* Throttle our rendering by waiting until the ring has completed our requests
3124 * emitted over 20 msec ago.
3125 *
b962442e
EA
3126 * Note that if we were to use the current jiffies each time around the loop,
3127 * we wouldn't escape the function with any frames outstanding if the time to
3128 * render a frame was over 20ms.
3129 *
673a394b
EA
3130 * This should get us reasonable parallelism between CPU and GPU but also
3131 * relatively low latency when blocking on a particular request to finish.
3132 */
40a5f0de 3133static int
f787a5f5 3134i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
40a5f0de 3135{
f787a5f5
CW
3136 struct drm_i915_private *dev_priv = dev->dev_private;
3137 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e 3138 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
f787a5f5
CW
3139 struct drm_i915_gem_request *request;
3140 struct intel_ring_buffer *ring = NULL;
3141 u32 seqno = 0;
3142 int ret;
93533c29 3143
1c25595f 3144 spin_lock(&file_priv->mm.lock);
f787a5f5 3145 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
b962442e
EA
3146 if (time_after_eq(request->emitted_jiffies, recent_enough))
3147 break;
40a5f0de 3148
f787a5f5
CW
3149 ring = request->ring;
3150 seqno = request->seqno;
b962442e 3151 }
1c25595f 3152 spin_unlock(&file_priv->mm.lock);
40a5f0de 3153
f787a5f5
CW
3154 if (seqno == 0)
3155 return 0;
2bc43b5c 3156
f787a5f5 3157 ret = 0;
78501eac 3158 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
f787a5f5
CW
3159 /* And wait for the seqno passing without holding any locks and
3160 * causing extra latency for others. This is safe as the irq
3161 * generation is designed to be run atomically and so is
3162 * lockless.
3163 */
b13c2b96
CW
3164 if (ring->irq_get(ring)) {
3165 ret = wait_event_interruptible(ring->irq_queue,
3166 i915_seqno_passed(ring->get_seqno(ring), seqno)
3167 || atomic_read(&dev_priv->mm.wedged));
3168 ring->irq_put(ring);
40a5f0de 3169
b13c2b96
CW
3170 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3171 ret = -EIO;
3172 }
40a5f0de
EA
3173 }
3174
f787a5f5
CW
3175 if (ret == 0)
3176 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
40a5f0de
EA
3177
3178 return ret;
3179}
3180
673a394b 3181int
05394f39
CW
3182i915_gem_object_pin(struct drm_i915_gem_object *obj,
3183 uint32_t alignment,
75e9e915 3184 bool map_and_fenceable)
673a394b 3185{
05394f39 3186 struct drm_device *dev = obj->base.dev;
f13d3f73 3187 struct drm_i915_private *dev_priv = dev->dev_private;
673a394b
EA
3188 int ret;
3189
05394f39 3190 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
23bc5982 3191 WARN_ON(i915_verify_lists(dev));
ac0c6b5a 3192
05394f39
CW
3193 if (obj->gtt_space != NULL) {
3194 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3195 (map_and_fenceable && !obj->map_and_fenceable)) {
3196 WARN(obj->pin_count,
ae7d49d8 3197 "bo is already pinned with incorrect alignment:"
75e9e915
DV
3198 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3199 " obj->map_and_fenceable=%d\n",
05394f39 3200 obj->gtt_offset, alignment,
75e9e915 3201 map_and_fenceable,
05394f39 3202 obj->map_and_fenceable);
ac0c6b5a
CW
3203 ret = i915_gem_object_unbind(obj);
3204 if (ret)
3205 return ret;
3206 }
3207 }
3208
05394f39 3209 if (obj->gtt_space == NULL) {
a00b10c3 3210 ret = i915_gem_object_bind_to_gtt(obj, alignment,
75e9e915 3211 map_and_fenceable);
9731129c 3212 if (ret)
673a394b 3213 return ret;
22c344e9 3214 }
76446cac 3215
05394f39 3216 if (obj->pin_count++ == 0) {
05394f39
CW
3217 if (!obj->active)
3218 list_move_tail(&obj->mm_list,
f13d3f73 3219 &dev_priv->mm.pinned_list);
673a394b 3220 }
6299f992 3221 obj->pin_mappable |= map_and_fenceable;
673a394b 3222
23bc5982 3223 WARN_ON(i915_verify_lists(dev));
673a394b
EA
3224 return 0;
3225}
3226
3227void
05394f39 3228i915_gem_object_unpin(struct drm_i915_gem_object *obj)
673a394b 3229{
05394f39 3230 struct drm_device *dev = obj->base.dev;
673a394b 3231 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 3232
23bc5982 3233 WARN_ON(i915_verify_lists(dev));
05394f39
CW
3234 BUG_ON(obj->pin_count == 0);
3235 BUG_ON(obj->gtt_space == NULL);
673a394b 3236
05394f39
CW
3237 if (--obj->pin_count == 0) {
3238 if (!obj->active)
3239 list_move_tail(&obj->mm_list,
673a394b 3240 &dev_priv->mm.inactive_list);
6299f992 3241 obj->pin_mappable = false;
673a394b 3242 }
23bc5982 3243 WARN_ON(i915_verify_lists(dev));
673a394b
EA
3244}
3245
3246int
3247i915_gem_pin_ioctl(struct drm_device *dev, void *data,
05394f39 3248 struct drm_file *file)
673a394b
EA
3249{
3250 struct drm_i915_gem_pin *args = data;
05394f39 3251 struct drm_i915_gem_object *obj;
673a394b
EA
3252 int ret;
3253
1d7cfea1
CW
3254 ret = i915_mutex_lock_interruptible(dev);
3255 if (ret)
3256 return ret;
673a394b 3257
05394f39 3258 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
673a394b 3259 if (obj == NULL) {
1d7cfea1
CW
3260 ret = -ENOENT;
3261 goto unlock;
673a394b 3262 }
673a394b 3263
05394f39 3264 if (obj->madv != I915_MADV_WILLNEED) {
bb6baf76 3265 DRM_ERROR("Attempting to pin a purgeable buffer\n");
1d7cfea1
CW
3266 ret = -EINVAL;
3267 goto out;
3ef94daa
CW
3268 }
3269
05394f39 3270 if (obj->pin_filp != NULL && obj->pin_filp != file) {
79e53945
JB
3271 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3272 args->handle);
1d7cfea1
CW
3273 ret = -EINVAL;
3274 goto out;
79e53945
JB
3275 }
3276
05394f39
CW
3277 obj->user_pin_count++;
3278 obj->pin_filp = file;
3279 if (obj->user_pin_count == 1) {
75e9e915 3280 ret = i915_gem_object_pin(obj, args->alignment, true);
1d7cfea1
CW
3281 if (ret)
3282 goto out;
673a394b
EA
3283 }
3284
3285 /* XXX - flush the CPU caches for pinned objects
3286 * as the X server doesn't manage domains yet
3287 */
e47c68e9 3288 i915_gem_object_flush_cpu_write_domain(obj);
05394f39 3289 args->offset = obj->gtt_offset;
1d7cfea1 3290out:
05394f39 3291 drm_gem_object_unreference(&obj->base);
1d7cfea1 3292unlock:
673a394b 3293 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3294 return ret;
673a394b
EA
3295}
3296
3297int
3298i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
05394f39 3299 struct drm_file *file)
673a394b
EA
3300{
3301 struct drm_i915_gem_pin *args = data;
05394f39 3302 struct drm_i915_gem_object *obj;
76c1dec1 3303 int ret;
673a394b 3304
1d7cfea1
CW
3305 ret = i915_mutex_lock_interruptible(dev);
3306 if (ret)
3307 return ret;
673a394b 3308
05394f39 3309 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
673a394b 3310 if (obj == NULL) {
1d7cfea1
CW
3311 ret = -ENOENT;
3312 goto unlock;
673a394b 3313 }
76c1dec1 3314
05394f39 3315 if (obj->pin_filp != file) {
79e53945
JB
3316 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3317 args->handle);
1d7cfea1
CW
3318 ret = -EINVAL;
3319 goto out;
79e53945 3320 }
05394f39
CW
3321 obj->user_pin_count--;
3322 if (obj->user_pin_count == 0) {
3323 obj->pin_filp = NULL;
79e53945
JB
3324 i915_gem_object_unpin(obj);
3325 }
673a394b 3326
1d7cfea1 3327out:
05394f39 3328 drm_gem_object_unreference(&obj->base);
1d7cfea1 3329unlock:
673a394b 3330 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3331 return ret;
673a394b
EA
3332}
3333
3334int
3335i915_gem_busy_ioctl(struct drm_device *dev, void *data,
05394f39 3336 struct drm_file *file)
673a394b
EA
3337{
3338 struct drm_i915_gem_busy *args = data;
05394f39 3339 struct drm_i915_gem_object *obj;
30dbf0c0
CW
3340 int ret;
3341
76c1dec1 3342 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 3343 if (ret)
76c1dec1 3344 return ret;
673a394b 3345
05394f39 3346 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
673a394b 3347 if (obj == NULL) {
1d7cfea1
CW
3348 ret = -ENOENT;
3349 goto unlock;
673a394b 3350 }
d1b851fc 3351
0be555b6
CW
3352 /* Count all active objects as busy, even if they are currently not used
3353 * by the gpu. Users of this interface expect objects to eventually
3354 * become non-busy without any further actions, therefore emit any
3355 * necessary flushes here.
c4de0a5d 3356 */
05394f39 3357 args->busy = obj->active;
0be555b6
CW
3358 if (args->busy) {
3359 /* Unconditionally flush objects, even when the gpu still uses this
3360 * object. Userspace calling this function indicates that it wants to
3361 * use this buffer rather sooner than later, so issuing the required
3362 * flush earlier is beneficial.
3363 */
1a1c6976 3364 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
05394f39
CW
3365 i915_gem_flush_ring(dev, obj->ring,
3366 0, obj->base.write_domain);
1a1c6976
CW
3367 } else if (obj->ring->outstanding_lazy_request ==
3368 obj->last_rendering_seqno) {
3369 struct drm_i915_gem_request *request;
3370
7a194876
CW
3371 /* This ring is not being cleared by active usage,
3372 * so emit a request to do so.
3373 */
1a1c6976
CW
3374 request = kzalloc(sizeof(*request), GFP_KERNEL);
3375 if (request)
3376 ret = i915_add_request(dev,
3377 NULL, request,
3378 obj->ring);
3379 else
7a194876
CW
3380 ret = -ENOMEM;
3381 }
0be555b6
CW
3382
3383 /* Update the active list for the hardware's current position.
3384 * Otherwise this only updates on a delayed timer or when irqs
3385 * are actually unmasked, and our working set ends up being
3386 * larger than required.
3387 */
05394f39 3388 i915_gem_retire_requests_ring(dev, obj->ring);
0be555b6 3389
05394f39 3390 args->busy = obj->active;
0be555b6 3391 }
673a394b 3392
05394f39 3393 drm_gem_object_unreference(&obj->base);
1d7cfea1 3394unlock:
673a394b 3395 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3396 return ret;
673a394b
EA
3397}
3398
3399int
3400i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3401 struct drm_file *file_priv)
3402{
3403 return i915_gem_ring_throttle(dev, file_priv);
3404}
3405
3ef94daa
CW
3406int
3407i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3408 struct drm_file *file_priv)
3409{
3410 struct drm_i915_gem_madvise *args = data;
05394f39 3411 struct drm_i915_gem_object *obj;
76c1dec1 3412 int ret;
3ef94daa
CW
3413
3414 switch (args->madv) {
3415 case I915_MADV_DONTNEED:
3416 case I915_MADV_WILLNEED:
3417 break;
3418 default:
3419 return -EINVAL;
3420 }
3421
1d7cfea1
CW
3422 ret = i915_mutex_lock_interruptible(dev);
3423 if (ret)
3424 return ret;
3425
05394f39 3426 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3ef94daa 3427 if (obj == NULL) {
1d7cfea1
CW
3428 ret = -ENOENT;
3429 goto unlock;
3ef94daa 3430 }
3ef94daa 3431
05394f39 3432 if (obj->pin_count) {
1d7cfea1
CW
3433 ret = -EINVAL;
3434 goto out;
3ef94daa
CW
3435 }
3436
05394f39
CW
3437 if (obj->madv != __I915_MADV_PURGED)
3438 obj->madv = args->madv;
3ef94daa 3439
2d7ef395 3440 /* if the object is no longer bound, discard its backing storage */
05394f39
CW
3441 if (i915_gem_object_is_purgeable(obj) &&
3442 obj->gtt_space == NULL)
2d7ef395
CW
3443 i915_gem_object_truncate(obj);
3444
05394f39 3445 args->retained = obj->madv != __I915_MADV_PURGED;
bb6baf76 3446
1d7cfea1 3447out:
05394f39 3448 drm_gem_object_unreference(&obj->base);
1d7cfea1 3449unlock:
3ef94daa 3450 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3451 return ret;
3ef94daa
CW
3452}
3453
05394f39
CW
3454struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3455 size_t size)
ac52bc56 3456{
73aa808f 3457 struct drm_i915_private *dev_priv = dev->dev_private;
c397b908 3458 struct drm_i915_gem_object *obj;
ac52bc56 3459
c397b908
DV
3460 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3461 if (obj == NULL)
3462 return NULL;
673a394b 3463
c397b908
DV
3464 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3465 kfree(obj);
3466 return NULL;
3467 }
673a394b 3468
73aa808f
CW
3469 i915_gem_info_add_obj(dev_priv, size);
3470
c397b908
DV
3471 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3472 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
673a394b 3473
c397b908 3474 obj->agp_type = AGP_USER_MEMORY;
62b8b215 3475 obj->base.driver_private = NULL;
c397b908 3476 obj->fence_reg = I915_FENCE_REG_NONE;
69dc4987 3477 INIT_LIST_HEAD(&obj->mm_list);
93a37f20 3478 INIT_LIST_HEAD(&obj->gtt_list);
69dc4987 3479 INIT_LIST_HEAD(&obj->ring_list);
432e58ed 3480 INIT_LIST_HEAD(&obj->exec_list);
c397b908 3481 INIT_LIST_HEAD(&obj->gpu_write_list);
c397b908 3482 obj->madv = I915_MADV_WILLNEED;
75e9e915
DV
3483 /* Avoid an unnecessary call to unbind on the first bind. */
3484 obj->map_and_fenceable = true;
de151cf6 3485
05394f39 3486 return obj;
c397b908
DV
3487}
3488
3489int i915_gem_init_object(struct drm_gem_object *obj)
3490{
3491 BUG();
de151cf6 3492
673a394b
EA
3493 return 0;
3494}
3495
05394f39 3496static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
673a394b 3497{
05394f39 3498 struct drm_device *dev = obj->base.dev;
be72615b 3499 drm_i915_private_t *dev_priv = dev->dev_private;
be72615b 3500 int ret;
673a394b 3501
be72615b
CW
3502 ret = i915_gem_object_unbind(obj);
3503 if (ret == -ERESTARTSYS) {
05394f39 3504 list_move(&obj->mm_list,
be72615b
CW
3505 &dev_priv->mm.deferred_free_list);
3506 return;
3507 }
673a394b 3508
05394f39 3509 if (obj->base.map_list.map)
7e616158 3510 i915_gem_free_mmap_offset(obj);
de151cf6 3511
05394f39
CW
3512 drm_gem_object_release(&obj->base);
3513 i915_gem_info_remove_obj(dev_priv, obj->base.size);
c397b908 3514
05394f39
CW
3515 kfree(obj->page_cpu_valid);
3516 kfree(obj->bit_17);
3517 kfree(obj);
673a394b
EA
3518}
3519
05394f39 3520void i915_gem_free_object(struct drm_gem_object *gem_obj)
be72615b 3521{
05394f39
CW
3522 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3523 struct drm_device *dev = obj->base.dev;
be72615b
CW
3524
3525 trace_i915_gem_object_destroy(obj);
3526
05394f39 3527 while (obj->pin_count > 0)
be72615b
CW
3528 i915_gem_object_unpin(obj);
3529
05394f39 3530 if (obj->phys_obj)
be72615b
CW
3531 i915_gem_detach_phys_object(dev, obj);
3532
3533 i915_gem_free_object_tail(obj);
3534}
3535
29105ccc
CW
3536int
3537i915_gem_idle(struct drm_device *dev)
3538{
3539 drm_i915_private_t *dev_priv = dev->dev_private;
3540 int ret;
28dfe52a 3541
29105ccc 3542 mutex_lock(&dev->struct_mutex);
1c5d22f7 3543
87acb0a5 3544 if (dev_priv->mm.suspended) {
29105ccc
CW
3545 mutex_unlock(&dev->struct_mutex);
3546 return 0;
28dfe52a
EA
3547 }
3548
29105ccc 3549 ret = i915_gpu_idle(dev);
6dbe2772
KP
3550 if (ret) {
3551 mutex_unlock(&dev->struct_mutex);
673a394b 3552 return ret;
6dbe2772 3553 }
673a394b 3554
29105ccc
CW
3555 /* Under UMS, be paranoid and evict. */
3556 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
5eac3ab4 3557 ret = i915_gem_evict_inactive(dev, false);
29105ccc
CW
3558 if (ret) {
3559 mutex_unlock(&dev->struct_mutex);
3560 return ret;
3561 }
3562 }
3563
312817a3
CW
3564 i915_gem_reset_fences(dev);
3565
29105ccc
CW
3566 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3567 * We need to replace this with a semaphore, or something.
3568 * And not confound mm.suspended!
3569 */
3570 dev_priv->mm.suspended = 1;
bc0c7f14 3571 del_timer_sync(&dev_priv->hangcheck_timer);
29105ccc
CW
3572
3573 i915_kernel_lost_context(dev);
6dbe2772 3574 i915_gem_cleanup_ringbuffer(dev);
29105ccc 3575
6dbe2772
KP
3576 mutex_unlock(&dev->struct_mutex);
3577
29105ccc
CW
3578 /* Cancel the retire work handler, which should be idle now. */
3579 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3580
673a394b
EA
3581 return 0;
3582}
3583
8187a2b7
ZN
3584int
3585i915_gem_init_ringbuffer(struct drm_device *dev)
3586{
3587 drm_i915_private_t *dev_priv = dev->dev_private;
3588 int ret;
68f95ba9 3589
5c1143bb 3590 ret = intel_init_render_ring_buffer(dev);
68f95ba9 3591 if (ret)
b6913e4b 3592 return ret;
68f95ba9
CW
3593
3594 if (HAS_BSD(dev)) {
5c1143bb 3595 ret = intel_init_bsd_ring_buffer(dev);
68f95ba9
CW
3596 if (ret)
3597 goto cleanup_render_ring;
d1b851fc 3598 }
68f95ba9 3599
549f7365
CW
3600 if (HAS_BLT(dev)) {
3601 ret = intel_init_blt_ring_buffer(dev);
3602 if (ret)
3603 goto cleanup_bsd_ring;
3604 }
3605
6f392d54
CW
3606 dev_priv->next_seqno = 1;
3607
68f95ba9
CW
3608 return 0;
3609
549f7365 3610cleanup_bsd_ring:
1ec14ad3 3611 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
68f95ba9 3612cleanup_render_ring:
1ec14ad3 3613 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
8187a2b7
ZN
3614 return ret;
3615}
3616
3617void
3618i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3619{
3620 drm_i915_private_t *dev_priv = dev->dev_private;
1ec14ad3 3621 int i;
8187a2b7 3622
1ec14ad3
CW
3623 for (i = 0; i < I915_NUM_RINGS; i++)
3624 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
8187a2b7
ZN
3625}
3626
673a394b
EA
3627int
3628i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3629 struct drm_file *file_priv)
3630{
3631 drm_i915_private_t *dev_priv = dev->dev_private;
1ec14ad3 3632 int ret, i;
673a394b 3633
79e53945
JB
3634 if (drm_core_check_feature(dev, DRIVER_MODESET))
3635 return 0;
3636
ba1234d1 3637 if (atomic_read(&dev_priv->mm.wedged)) {
673a394b 3638 DRM_ERROR("Reenabling wedged hardware, good luck\n");
ba1234d1 3639 atomic_set(&dev_priv->mm.wedged, 0);
673a394b
EA
3640 }
3641
673a394b 3642 mutex_lock(&dev->struct_mutex);
9bb2d6f9
EA
3643 dev_priv->mm.suspended = 0;
3644
3645 ret = i915_gem_init_ringbuffer(dev);
d816f6ac
WF
3646 if (ret != 0) {
3647 mutex_unlock(&dev->struct_mutex);
9bb2d6f9 3648 return ret;
d816f6ac 3649 }
9bb2d6f9 3650
69dc4987 3651 BUG_ON(!list_empty(&dev_priv->mm.active_list));
673a394b
EA
3652 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3653 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
1ec14ad3
CW
3654 for (i = 0; i < I915_NUM_RINGS; i++) {
3655 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3656 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3657 }
673a394b 3658 mutex_unlock(&dev->struct_mutex);
dbb19d30 3659
5f35308b
CW
3660 ret = drm_irq_install(dev);
3661 if (ret)
3662 goto cleanup_ringbuffer;
dbb19d30 3663
673a394b 3664 return 0;
5f35308b
CW
3665
3666cleanup_ringbuffer:
3667 mutex_lock(&dev->struct_mutex);
3668 i915_gem_cleanup_ringbuffer(dev);
3669 dev_priv->mm.suspended = 1;
3670 mutex_unlock(&dev->struct_mutex);
3671
3672 return ret;
673a394b
EA
3673}
3674
3675int
3676i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3677 struct drm_file *file_priv)
3678{
79e53945
JB
3679 if (drm_core_check_feature(dev, DRIVER_MODESET))
3680 return 0;
3681
dbb19d30 3682 drm_irq_uninstall(dev);
e6890f6f 3683 return i915_gem_idle(dev);
673a394b
EA
3684}
3685
3686void
3687i915_gem_lastclose(struct drm_device *dev)
3688{
3689 int ret;
673a394b 3690
e806b495
EA
3691 if (drm_core_check_feature(dev, DRIVER_MODESET))
3692 return;
3693
6dbe2772
KP
3694 ret = i915_gem_idle(dev);
3695 if (ret)
3696 DRM_ERROR("failed to idle hardware: %d\n", ret);
673a394b
EA
3697}
3698
64193406
CW
3699static void
3700init_ring_lists(struct intel_ring_buffer *ring)
3701{
3702 INIT_LIST_HEAD(&ring->active_list);
3703 INIT_LIST_HEAD(&ring->request_list);
3704 INIT_LIST_HEAD(&ring->gpu_write_list);
3705}
3706
673a394b
EA
3707void
3708i915_gem_load(struct drm_device *dev)
3709{
b5aa8a0f 3710 int i;
673a394b
EA
3711 drm_i915_private_t *dev_priv = dev->dev_private;
3712
69dc4987 3713 INIT_LIST_HEAD(&dev_priv->mm.active_list);
673a394b
EA
3714 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3715 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
f13d3f73 3716 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
a09ba7fa 3717 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
be72615b 3718 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
93a37f20 3719 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
1ec14ad3
CW
3720 for (i = 0; i < I915_NUM_RINGS; i++)
3721 init_ring_lists(&dev_priv->ring[i]);
007cc8ac
DV
3722 for (i = 0; i < 16; i++)
3723 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
673a394b
EA
3724 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3725 i915_gem_retire_work_handler);
30dbf0c0 3726 init_completion(&dev_priv->error_completion);
31169714 3727
94400120
DA
3728 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3729 if (IS_GEN3(dev)) {
3730 u32 tmp = I915_READ(MI_ARB_STATE);
3731 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3732 /* arb state is a masked write, so set bit + bit in mask */
3733 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3734 I915_WRITE(MI_ARB_STATE, tmp);
3735 }
3736 }
3737
de151cf6 3738 /* Old X drivers will take 0-2 for front, back, depth buffers */
b397c836
EA
3739 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3740 dev_priv->fence_reg_start = 3;
de151cf6 3741
a6c45cf0 3742 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
de151cf6
JB
3743 dev_priv->num_fence_regs = 16;
3744 else
3745 dev_priv->num_fence_regs = 8;
3746
b5aa8a0f 3747 /* Initialize fence registers to zero */
a6c45cf0
CW
3748 switch (INTEL_INFO(dev)->gen) {
3749 case 6:
3750 for (i = 0; i < 16; i++)
3751 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
3752 break;
3753 case 5:
3754 case 4:
b5aa8a0f
GH
3755 for (i = 0; i < 16; i++)
3756 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
a6c45cf0
CW
3757 break;
3758 case 3:
b5aa8a0f
GH
3759 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3760 for (i = 0; i < 8; i++)
3761 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
a6c45cf0
CW
3762 case 2:
3763 for (i = 0; i < 8; i++)
3764 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
3765 break;
b5aa8a0f 3766 }
673a394b 3767 i915_gem_detect_bit_6_swizzle(dev);
6b95a207 3768 init_waitqueue_head(&dev_priv->pending_flip_queue);
17250b71
CW
3769
3770 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3771 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3772 register_shrinker(&dev_priv->mm.inactive_shrinker);
673a394b 3773}
71acb5eb
DA
3774
3775/*
3776 * Create a physically contiguous memory object for this object
3777 * e.g. for cursor + overlay regs
3778 */
995b6762
CW
3779static int i915_gem_init_phys_object(struct drm_device *dev,
3780 int id, int size, int align)
71acb5eb
DA
3781{
3782 drm_i915_private_t *dev_priv = dev->dev_private;
3783 struct drm_i915_gem_phys_object *phys_obj;
3784 int ret;
3785
3786 if (dev_priv->mm.phys_objs[id - 1] || !size)
3787 return 0;
3788
9a298b2a 3789 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
71acb5eb
DA
3790 if (!phys_obj)
3791 return -ENOMEM;
3792
3793 phys_obj->id = id;
3794
6eeefaf3 3795 phys_obj->handle = drm_pci_alloc(dev, size, align);
71acb5eb
DA
3796 if (!phys_obj->handle) {
3797 ret = -ENOMEM;
3798 goto kfree_obj;
3799 }
3800#ifdef CONFIG_X86
3801 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3802#endif
3803
3804 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3805
3806 return 0;
3807kfree_obj:
9a298b2a 3808 kfree(phys_obj);
71acb5eb
DA
3809 return ret;
3810}
3811
995b6762 3812static void i915_gem_free_phys_object(struct drm_device *dev, int id)
71acb5eb
DA
3813{
3814 drm_i915_private_t *dev_priv = dev->dev_private;
3815 struct drm_i915_gem_phys_object *phys_obj;
3816
3817 if (!dev_priv->mm.phys_objs[id - 1])
3818 return;
3819
3820 phys_obj = dev_priv->mm.phys_objs[id - 1];
3821 if (phys_obj->cur_obj) {
3822 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3823 }
3824
3825#ifdef CONFIG_X86
3826 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3827#endif
3828 drm_pci_free(dev, phys_obj->handle);
3829 kfree(phys_obj);
3830 dev_priv->mm.phys_objs[id - 1] = NULL;
3831}
3832
3833void i915_gem_free_all_phys_object(struct drm_device *dev)
3834{
3835 int i;
3836
260883c8 3837 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
71acb5eb
DA
3838 i915_gem_free_phys_object(dev, i);
3839}
3840
3841void i915_gem_detach_phys_object(struct drm_device *dev,
05394f39 3842 struct drm_i915_gem_object *obj)
71acb5eb 3843{
05394f39 3844 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
e5281ccd 3845 char *vaddr;
71acb5eb 3846 int i;
71acb5eb
DA
3847 int page_count;
3848
05394f39 3849 if (!obj->phys_obj)
71acb5eb 3850 return;
05394f39 3851 vaddr = obj->phys_obj->handle->vaddr;
71acb5eb 3852
05394f39 3853 page_count = obj->base.size / PAGE_SIZE;
71acb5eb 3854 for (i = 0; i < page_count; i++) {
e5281ccd
CW
3855 struct page *page = read_cache_page_gfp(mapping, i,
3856 GFP_HIGHUSER | __GFP_RECLAIMABLE);
3857 if (!IS_ERR(page)) {
3858 char *dst = kmap_atomic(page);
3859 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
3860 kunmap_atomic(dst);
3861
3862 drm_clflush_pages(&page, 1);
3863
3864 set_page_dirty(page);
3865 mark_page_accessed(page);
3866 page_cache_release(page);
3867 }
71acb5eb 3868 }
40ce6575 3869 intel_gtt_chipset_flush();
d78b47b9 3870
05394f39
CW
3871 obj->phys_obj->cur_obj = NULL;
3872 obj->phys_obj = NULL;
71acb5eb
DA
3873}
3874
3875int
3876i915_gem_attach_phys_object(struct drm_device *dev,
05394f39 3877 struct drm_i915_gem_object *obj,
6eeefaf3
CW
3878 int id,
3879 int align)
71acb5eb 3880{
05394f39 3881 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
71acb5eb 3882 drm_i915_private_t *dev_priv = dev->dev_private;
71acb5eb
DA
3883 int ret = 0;
3884 int page_count;
3885 int i;
3886
3887 if (id > I915_MAX_PHYS_OBJECT)
3888 return -EINVAL;
3889
05394f39
CW
3890 if (obj->phys_obj) {
3891 if (obj->phys_obj->id == id)
71acb5eb
DA
3892 return 0;
3893 i915_gem_detach_phys_object(dev, obj);
3894 }
3895
71acb5eb
DA
3896 /* create a new object */
3897 if (!dev_priv->mm.phys_objs[id - 1]) {
3898 ret = i915_gem_init_phys_object(dev, id,
05394f39 3899 obj->base.size, align);
71acb5eb 3900 if (ret) {
05394f39
CW
3901 DRM_ERROR("failed to init phys object %d size: %zu\n",
3902 id, obj->base.size);
e5281ccd 3903 return ret;
71acb5eb
DA
3904 }
3905 }
3906
3907 /* bind to the object */
05394f39
CW
3908 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3909 obj->phys_obj->cur_obj = obj;
71acb5eb 3910
05394f39 3911 page_count = obj->base.size / PAGE_SIZE;
71acb5eb
DA
3912
3913 for (i = 0; i < page_count; i++) {
e5281ccd
CW
3914 struct page *page;
3915 char *dst, *src;
3916
3917 page = read_cache_page_gfp(mapping, i,
3918 GFP_HIGHUSER | __GFP_RECLAIMABLE);
3919 if (IS_ERR(page))
3920 return PTR_ERR(page);
71acb5eb 3921
ff75b9bc 3922 src = kmap_atomic(page);
05394f39 3923 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
71acb5eb 3924 memcpy(dst, src, PAGE_SIZE);
3e4d3af5 3925 kunmap_atomic(src);
71acb5eb 3926
e5281ccd
CW
3927 mark_page_accessed(page);
3928 page_cache_release(page);
3929 }
d78b47b9 3930
71acb5eb 3931 return 0;
71acb5eb
DA
3932}
3933
3934static int
05394f39
CW
3935i915_gem_phys_pwrite(struct drm_device *dev,
3936 struct drm_i915_gem_object *obj,
71acb5eb
DA
3937 struct drm_i915_gem_pwrite *args,
3938 struct drm_file *file_priv)
3939{
05394f39 3940 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
b47b30cc 3941 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
71acb5eb 3942
b47b30cc
CW
3943 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
3944 unsigned long unwritten;
3945
3946 /* The physical object once assigned is fixed for the lifetime
3947 * of the obj, so we can safely drop the lock and continue
3948 * to access vaddr.
3949 */
3950 mutex_unlock(&dev->struct_mutex);
3951 unwritten = copy_from_user(vaddr, user_data, args->size);
3952 mutex_lock(&dev->struct_mutex);
3953 if (unwritten)
3954 return -EFAULT;
3955 }
71acb5eb 3956
40ce6575 3957 intel_gtt_chipset_flush();
71acb5eb
DA
3958 return 0;
3959}
b962442e 3960
f787a5f5 3961void i915_gem_release(struct drm_device *dev, struct drm_file *file)
b962442e 3962{
f787a5f5 3963 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e
EA
3964
3965 /* Clean up our request list when the client is going away, so that
3966 * later retire_requests won't dereference our soon-to-be-gone
3967 * file_priv.
3968 */
1c25595f 3969 spin_lock(&file_priv->mm.lock);
f787a5f5
CW
3970 while (!list_empty(&file_priv->mm.request_list)) {
3971 struct drm_i915_gem_request *request;
3972
3973 request = list_first_entry(&file_priv->mm.request_list,
3974 struct drm_i915_gem_request,
3975 client_list);
3976 list_del(&request->client_list);
3977 request->file_priv = NULL;
3978 }
1c25595f 3979 spin_unlock(&file_priv->mm.lock);
b962442e 3980}
31169714 3981
1637ef41
CW
3982static int
3983i915_gpu_is_active(struct drm_device *dev)
3984{
3985 drm_i915_private_t *dev_priv = dev->dev_private;
3986 int lists_empty;
3987
1637ef41 3988 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
17250b71 3989 list_empty(&dev_priv->mm.active_list);
1637ef41
CW
3990
3991 return !lists_empty;
3992}
3993
31169714 3994static int
17250b71
CW
3995i915_gem_inactive_shrink(struct shrinker *shrinker,
3996 int nr_to_scan,
3997 gfp_t gfp_mask)
31169714 3998{
17250b71
CW
3999 struct drm_i915_private *dev_priv =
4000 container_of(shrinker,
4001 struct drm_i915_private,
4002 mm.inactive_shrinker);
4003 struct drm_device *dev = dev_priv->dev;
4004 struct drm_i915_gem_object *obj, *next;
4005 int cnt;
4006
4007 if (!mutex_trylock(&dev->struct_mutex))
bbe2e11a 4008 return 0;
31169714
CW
4009
4010 /* "fast-path" to count number of available objects */
4011 if (nr_to_scan == 0) {
17250b71
CW
4012 cnt = 0;
4013 list_for_each_entry(obj,
4014 &dev_priv->mm.inactive_list,
4015 mm_list)
4016 cnt++;
4017 mutex_unlock(&dev->struct_mutex);
4018 return cnt / 100 * sysctl_vfs_cache_pressure;
31169714
CW
4019 }
4020
1637ef41 4021rescan:
31169714 4022 /* first scan for clean buffers */
17250b71 4023 i915_gem_retire_requests(dev);
31169714 4024
17250b71
CW
4025 list_for_each_entry_safe(obj, next,
4026 &dev_priv->mm.inactive_list,
4027 mm_list) {
4028 if (i915_gem_object_is_purgeable(obj)) {
2021746e
CW
4029 if (i915_gem_object_unbind(obj) == 0 &&
4030 --nr_to_scan == 0)
17250b71 4031 break;
31169714 4032 }
31169714
CW
4033 }
4034
4035 /* second pass, evict/count anything still on the inactive list */
17250b71
CW
4036 cnt = 0;
4037 list_for_each_entry_safe(obj, next,
4038 &dev_priv->mm.inactive_list,
4039 mm_list) {
2021746e
CW
4040 if (nr_to_scan &&
4041 i915_gem_object_unbind(obj) == 0)
17250b71 4042 nr_to_scan--;
2021746e 4043 else
17250b71
CW
4044 cnt++;
4045 }
4046
4047 if (nr_to_scan && i915_gpu_is_active(dev)) {
1637ef41
CW
4048 /*
4049 * We are desperate for pages, so as a last resort, wait
4050 * for the GPU to finish and discard whatever we can.
4051 * This has a dramatic impact to reduce the number of
4052 * OOM-killer events whilst running the GPU aggressively.
4053 */
17250b71 4054 if (i915_gpu_idle(dev) == 0)
1637ef41
CW
4055 goto rescan;
4056 }
17250b71
CW
4057 mutex_unlock(&dev->struct_mutex);
4058 return cnt / 100 * sysctl_vfs_cache_pressure;
31169714 4059}