drm/i915: Track unbound pages
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b
EA
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
1c5d22f7 32#include "i915_trace.h"
652c393a 33#include "intel_drv.h"
5949eac4 34#include <linux/shmem_fs.h>
5a0e3ad6 35#include <linux/slab.h>
673a394b 36#include <linux/swap.h>
79e53945 37#include <linux/pci.h>
1286ff73 38#include <linux/dma-buf.h>
673a394b 39
05394f39
CW
40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
88241785
CW
42static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
43 unsigned alignment,
44 bool map_and_fenceable);
05394f39
CW
45static int i915_gem_phys_pwrite(struct drm_device *dev,
46 struct drm_i915_gem_object *obj,
71acb5eb 47 struct drm_i915_gem_pwrite *args,
05394f39 48 struct drm_file *file);
673a394b 49
61050808
CW
50static void i915_gem_write_fence(struct drm_device *dev, int reg,
51 struct drm_i915_gem_object *obj);
52static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53 struct drm_i915_fence_reg *fence,
54 bool enable);
55
17250b71 56static int i915_gem_inactive_shrink(struct shrinker *shrinker,
1495f230 57 struct shrink_control *sc);
6c085a72
CW
58static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
59static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
8c59967c 60static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
31169714 61
61050808
CW
62static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
63{
64 if (obj->tiling_mode)
65 i915_gem_release_mmap(obj);
66
67 /* As we do not have an associated fence register, we will force
68 * a tiling change if we ever need to acquire one.
69 */
5d82e3e6 70 obj->fence_dirty = false;
61050808
CW
71 obj->fence_reg = I915_FENCE_REG_NONE;
72}
73
73aa808f
CW
74/* some bookkeeping */
75static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
76 size_t size)
77{
78 dev_priv->mm.object_count++;
79 dev_priv->mm.object_memory += size;
80}
81
82static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
83 size_t size)
84{
85 dev_priv->mm.object_count--;
86 dev_priv->mm.object_memory -= size;
87}
88
21dd3734
CW
89static int
90i915_gem_wait_for_error(struct drm_device *dev)
30dbf0c0
CW
91{
92 struct drm_i915_private *dev_priv = dev->dev_private;
93 struct completion *x = &dev_priv->error_completion;
94 unsigned long flags;
95 int ret;
96
97 if (!atomic_read(&dev_priv->mm.wedged))
98 return 0;
99
0a6759c6
DV
100 /*
101 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
102 * userspace. If it takes that long something really bad is going on and
103 * we should simply try to bail out and fail as gracefully as possible.
104 */
105 ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
106 if (ret == 0) {
107 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
108 return -EIO;
109 } else if (ret < 0) {
30dbf0c0 110 return ret;
0a6759c6 111 }
30dbf0c0 112
21dd3734
CW
113 if (atomic_read(&dev_priv->mm.wedged)) {
114 /* GPU is hung, bump the completion count to account for
115 * the token we just consumed so that we never hit zero and
116 * end up waiting upon a subsequent completion event that
117 * will never happen.
118 */
119 spin_lock_irqsave(&x->wait.lock, flags);
120 x->done++;
121 spin_unlock_irqrestore(&x->wait.lock, flags);
122 }
123 return 0;
30dbf0c0
CW
124}
125
54cf91dc 126int i915_mutex_lock_interruptible(struct drm_device *dev)
76c1dec1 127{
76c1dec1
CW
128 int ret;
129
21dd3734 130 ret = i915_gem_wait_for_error(dev);
76c1dec1
CW
131 if (ret)
132 return ret;
133
134 ret = mutex_lock_interruptible(&dev->struct_mutex);
135 if (ret)
136 return ret;
137
23bc5982 138 WARN_ON(i915_verify_lists(dev));
76c1dec1
CW
139 return 0;
140}
30dbf0c0 141
7d1c4804 142static inline bool
05394f39 143i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
7d1c4804 144{
6c085a72 145 return obj->gtt_space && !obj->active;
7d1c4804
CW
146}
147
79e53945
JB
148int
149i915_gem_init_ioctl(struct drm_device *dev, void *data,
05394f39 150 struct drm_file *file)
79e53945
JB
151{
152 struct drm_i915_gem_init *args = data;
2021746e 153
7bb6fb8d
DV
154 if (drm_core_check_feature(dev, DRIVER_MODESET))
155 return -ENODEV;
156
2021746e
CW
157 if (args->gtt_start >= args->gtt_end ||
158 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
159 return -EINVAL;
79e53945 160
f534bc0b
DV
161 /* GEM with user mode setting was never supported on ilk and later. */
162 if (INTEL_INFO(dev)->gen >= 5)
163 return -ENODEV;
164
79e53945 165 mutex_lock(&dev->struct_mutex);
644ec02b
DV
166 i915_gem_init_global_gtt(dev, args->gtt_start,
167 args->gtt_end, args->gtt_end);
673a394b
EA
168 mutex_unlock(&dev->struct_mutex);
169
2021746e 170 return 0;
673a394b
EA
171}
172
5a125c3c
EA
173int
174i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
05394f39 175 struct drm_file *file)
5a125c3c 176{
73aa808f 177 struct drm_i915_private *dev_priv = dev->dev_private;
5a125c3c 178 struct drm_i915_gem_get_aperture *args = data;
6299f992
CW
179 struct drm_i915_gem_object *obj;
180 size_t pinned;
5a125c3c 181
6299f992 182 pinned = 0;
73aa808f 183 mutex_lock(&dev->struct_mutex);
6c085a72 184 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1b50247a
CW
185 if (obj->pin_count)
186 pinned += obj->gtt_space->size;
73aa808f 187 mutex_unlock(&dev->struct_mutex);
5a125c3c 188
6299f992 189 args->aper_size = dev_priv->mm.gtt_total;
0206e353 190 args->aper_available_size = args->aper_size - pinned;
6299f992 191
5a125c3c
EA
192 return 0;
193}
194
ff72145b
DA
195static int
196i915_gem_create(struct drm_file *file,
197 struct drm_device *dev,
198 uint64_t size,
199 uint32_t *handle_p)
673a394b 200{
05394f39 201 struct drm_i915_gem_object *obj;
a1a2d1d3
PP
202 int ret;
203 u32 handle;
673a394b 204
ff72145b 205 size = roundup(size, PAGE_SIZE);
8ffc0246
CW
206 if (size == 0)
207 return -EINVAL;
673a394b
EA
208
209 /* Allocate the new object */
ff72145b 210 obj = i915_gem_alloc_object(dev, size);
673a394b
EA
211 if (obj == NULL)
212 return -ENOMEM;
213
05394f39 214 ret = drm_gem_handle_create(file, &obj->base, &handle);
1dfd9754 215 if (ret) {
05394f39
CW
216 drm_gem_object_release(&obj->base);
217 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
202f2fef 218 kfree(obj);
673a394b 219 return ret;
1dfd9754 220 }
673a394b 221
202f2fef 222 /* drop reference from allocate - handle holds it now */
05394f39 223 drm_gem_object_unreference(&obj->base);
202f2fef
CW
224 trace_i915_gem_object_create(obj);
225
ff72145b 226 *handle_p = handle;
673a394b
EA
227 return 0;
228}
229
ff72145b
DA
230int
231i915_gem_dumb_create(struct drm_file *file,
232 struct drm_device *dev,
233 struct drm_mode_create_dumb *args)
234{
235 /* have to work out size/pitch and return them */
ed0291fd 236 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
ff72145b
DA
237 args->size = args->pitch * args->height;
238 return i915_gem_create(file, dev,
239 args->size, &args->handle);
240}
241
242int i915_gem_dumb_destroy(struct drm_file *file,
243 struct drm_device *dev,
244 uint32_t handle)
245{
246 return drm_gem_handle_delete(file, handle);
247}
248
249/**
250 * Creates a new mm object and returns a handle to it.
251 */
252int
253i915_gem_create_ioctl(struct drm_device *dev, void *data,
254 struct drm_file *file)
255{
256 struct drm_i915_gem_create *args = data;
63ed2cb2 257
ff72145b
DA
258 return i915_gem_create(file, dev,
259 args->size, &args->handle);
260}
261
05394f39 262static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
280b713b 263{
05394f39 264 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
280b713b
EA
265
266 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
05394f39 267 obj->tiling_mode != I915_TILING_NONE;
280b713b
EA
268}
269
8461d226
DV
270static inline int
271__copy_to_user_swizzled(char __user *cpu_vaddr,
272 const char *gpu_vaddr, int gpu_offset,
273 int length)
274{
275 int ret, cpu_offset = 0;
276
277 while (length > 0) {
278 int cacheline_end = ALIGN(gpu_offset + 1, 64);
279 int this_length = min(cacheline_end - gpu_offset, length);
280 int swizzled_gpu_offset = gpu_offset ^ 64;
281
282 ret = __copy_to_user(cpu_vaddr + cpu_offset,
283 gpu_vaddr + swizzled_gpu_offset,
284 this_length);
285 if (ret)
286 return ret + length;
287
288 cpu_offset += this_length;
289 gpu_offset += this_length;
290 length -= this_length;
291 }
292
293 return 0;
294}
295
8c59967c 296static inline int
4f0c7cfb
BW
297__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
298 const char __user *cpu_vaddr,
8c59967c
DV
299 int length)
300{
301 int ret, cpu_offset = 0;
302
303 while (length > 0) {
304 int cacheline_end = ALIGN(gpu_offset + 1, 64);
305 int this_length = min(cacheline_end - gpu_offset, length);
306 int swizzled_gpu_offset = gpu_offset ^ 64;
307
308 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
309 cpu_vaddr + cpu_offset,
310 this_length);
311 if (ret)
312 return ret + length;
313
314 cpu_offset += this_length;
315 gpu_offset += this_length;
316 length -= this_length;
317 }
318
319 return 0;
320}
321
d174bd64
DV
322/* Per-page copy function for the shmem pread fastpath.
323 * Flushes invalid cachelines before reading the target if
324 * needs_clflush is set. */
eb01459f 325static int
d174bd64
DV
326shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
327 char __user *user_data,
328 bool page_do_bit17_swizzling, bool needs_clflush)
329{
330 char *vaddr;
331 int ret;
332
e7e58eb5 333 if (unlikely(page_do_bit17_swizzling))
d174bd64
DV
334 return -EINVAL;
335
336 vaddr = kmap_atomic(page);
337 if (needs_clflush)
338 drm_clflush_virt_range(vaddr + shmem_page_offset,
339 page_length);
340 ret = __copy_to_user_inatomic(user_data,
341 vaddr + shmem_page_offset,
342 page_length);
343 kunmap_atomic(vaddr);
344
345 return ret;
346}
347
23c18c71
DV
348static void
349shmem_clflush_swizzled_range(char *addr, unsigned long length,
350 bool swizzled)
351{
e7e58eb5 352 if (unlikely(swizzled)) {
23c18c71
DV
353 unsigned long start = (unsigned long) addr;
354 unsigned long end = (unsigned long) addr + length;
355
356 /* For swizzling simply ensure that we always flush both
357 * channels. Lame, but simple and it works. Swizzled
358 * pwrite/pread is far from a hotpath - current userspace
359 * doesn't use it at all. */
360 start = round_down(start, 128);
361 end = round_up(end, 128);
362
363 drm_clflush_virt_range((void *)start, end - start);
364 } else {
365 drm_clflush_virt_range(addr, length);
366 }
367
368}
369
d174bd64
DV
370/* Only difference to the fast-path function is that this can handle bit17
371 * and uses non-atomic copy and kmap functions. */
372static int
373shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
374 char __user *user_data,
375 bool page_do_bit17_swizzling, bool needs_clflush)
376{
377 char *vaddr;
378 int ret;
379
380 vaddr = kmap(page);
381 if (needs_clflush)
23c18c71
DV
382 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
383 page_length,
384 page_do_bit17_swizzling);
d174bd64
DV
385
386 if (page_do_bit17_swizzling)
387 ret = __copy_to_user_swizzled(user_data,
388 vaddr, shmem_page_offset,
389 page_length);
390 else
391 ret = __copy_to_user(user_data,
392 vaddr + shmem_page_offset,
393 page_length);
394 kunmap(page);
395
396 return ret;
397}
398
eb01459f 399static int
dbf7bff0
DV
400i915_gem_shmem_pread(struct drm_device *dev,
401 struct drm_i915_gem_object *obj,
402 struct drm_i915_gem_pread *args,
403 struct drm_file *file)
eb01459f 404{
05394f39 405 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
8461d226 406 char __user *user_data;
eb01459f 407 ssize_t remain;
8461d226 408 loff_t offset;
eb2c0c81 409 int shmem_page_offset, page_length, ret = 0;
8461d226 410 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
dbf7bff0 411 int hit_slowpath = 0;
96d79b52 412 int prefaulted = 0;
8489731c 413 int needs_clflush = 0;
692a576b 414 int release_page;
eb01459f 415
8461d226 416 user_data = (char __user *) (uintptr_t) args->data_ptr;
eb01459f
EA
417 remain = args->size;
418
8461d226 419 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
eb01459f 420
8489731c
DV
421 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
422 /* If we're not in the cpu read domain, set ourself into the gtt
423 * read domain and manually flush cachelines (if required). This
424 * optimizes for the case when the gpu will dirty the data
425 * anyway again before the next pread happens. */
426 if (obj->cache_level == I915_CACHE_NONE)
427 needs_clflush = 1;
6c085a72
CW
428 if (obj->gtt_space) {
429 ret = i915_gem_object_set_to_gtt_domain(obj, false);
430 if (ret)
431 return ret;
432 }
8489731c 433 }
eb01459f 434
8461d226 435 offset = args->offset;
eb01459f
EA
436
437 while (remain > 0) {
e5281ccd
CW
438 struct page *page;
439
eb01459f
EA
440 /* Operation in this page
441 *
eb01459f 442 * shmem_page_offset = offset within page in shmem file
eb01459f
EA
443 * page_length = bytes to copy for this page
444 */
c8cbbb8b 445 shmem_page_offset = offset_in_page(offset);
eb01459f
EA
446 page_length = remain;
447 if ((shmem_page_offset + page_length) > PAGE_SIZE)
448 page_length = PAGE_SIZE - shmem_page_offset;
eb01459f 449
692a576b
DV
450 if (obj->pages) {
451 page = obj->pages[offset >> PAGE_SHIFT];
452 release_page = 0;
453 } else {
454 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
455 if (IS_ERR(page)) {
456 ret = PTR_ERR(page);
457 goto out;
458 }
459 release_page = 1;
b65552f0 460 }
e5281ccd 461
8461d226
DV
462 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
463 (page_to_phys(page) & (1 << 17)) != 0;
464
d174bd64
DV
465 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
466 user_data, page_do_bit17_swizzling,
467 needs_clflush);
468 if (ret == 0)
469 goto next_page;
dbf7bff0
DV
470
471 hit_slowpath = 1;
692a576b 472 page_cache_get(page);
dbf7bff0
DV
473 mutex_unlock(&dev->struct_mutex);
474
96d79b52 475 if (!prefaulted) {
f56f821f 476 ret = fault_in_multipages_writeable(user_data, remain);
96d79b52
DV
477 /* Userspace is tricking us, but we've already clobbered
478 * its pages with the prefault and promised to write the
479 * data up to the first fault. Hence ignore any errors
480 * and just continue. */
481 (void)ret;
482 prefaulted = 1;
483 }
eb01459f 484
d174bd64
DV
485 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
486 user_data, page_do_bit17_swizzling,
487 needs_clflush);
eb01459f 488
dbf7bff0 489 mutex_lock(&dev->struct_mutex);
e5281ccd 490 page_cache_release(page);
dbf7bff0 491next_page:
e5281ccd 492 mark_page_accessed(page);
692a576b
DV
493 if (release_page)
494 page_cache_release(page);
e5281ccd 495
8461d226
DV
496 if (ret) {
497 ret = -EFAULT;
498 goto out;
499 }
500
eb01459f 501 remain -= page_length;
8461d226 502 user_data += page_length;
eb01459f
EA
503 offset += page_length;
504 }
505
4f27b75d 506out:
dbf7bff0
DV
507 if (hit_slowpath) {
508 /* Fixup: Kill any reinstated backing storage pages */
509 if (obj->madv == __I915_MADV_PURGED)
510 i915_gem_object_truncate(obj);
511 }
eb01459f
EA
512
513 return ret;
514}
515
673a394b
EA
516/**
517 * Reads data from the object referenced by handle.
518 *
519 * On error, the contents of *data are undefined.
520 */
521int
522i915_gem_pread_ioctl(struct drm_device *dev, void *data,
05394f39 523 struct drm_file *file)
673a394b
EA
524{
525 struct drm_i915_gem_pread *args = data;
05394f39 526 struct drm_i915_gem_object *obj;
35b62a89 527 int ret = 0;
673a394b 528
51311d0a
CW
529 if (args->size == 0)
530 return 0;
531
532 if (!access_ok(VERIFY_WRITE,
533 (char __user *)(uintptr_t)args->data_ptr,
534 args->size))
535 return -EFAULT;
536
4f27b75d 537 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 538 if (ret)
4f27b75d 539 return ret;
673a394b 540
05394f39 541 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 542 if (&obj->base == NULL) {
1d7cfea1
CW
543 ret = -ENOENT;
544 goto unlock;
4f27b75d 545 }
673a394b 546
7dcd2499 547 /* Bounds check source. */
05394f39
CW
548 if (args->offset > obj->base.size ||
549 args->size > obj->base.size - args->offset) {
ce9d419d 550 ret = -EINVAL;
35b62a89 551 goto out;
ce9d419d
CW
552 }
553
1286ff73
DV
554 /* prime objects have no backing filp to GEM pread/pwrite
555 * pages from.
556 */
557 if (!obj->base.filp) {
558 ret = -EINVAL;
559 goto out;
560 }
561
db53a302
CW
562 trace_i915_gem_object_pread(obj, args->offset, args->size);
563
dbf7bff0 564 ret = i915_gem_shmem_pread(dev, obj, args, file);
673a394b 565
35b62a89 566out:
05394f39 567 drm_gem_object_unreference(&obj->base);
1d7cfea1 568unlock:
4f27b75d 569 mutex_unlock(&dev->struct_mutex);
eb01459f 570 return ret;
673a394b
EA
571}
572
0839ccb8
KP
573/* This is the fast write path which cannot handle
574 * page faults in the source data
9b7530cc 575 */
0839ccb8
KP
576
577static inline int
578fast_user_write(struct io_mapping *mapping,
579 loff_t page_base, int page_offset,
580 char __user *user_data,
581 int length)
9b7530cc 582{
4f0c7cfb
BW
583 void __iomem *vaddr_atomic;
584 void *vaddr;
0839ccb8 585 unsigned long unwritten;
9b7530cc 586
3e4d3af5 587 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
4f0c7cfb
BW
588 /* We can use the cpu mem copy function because this is X86. */
589 vaddr = (void __force*)vaddr_atomic + page_offset;
590 unwritten = __copy_from_user_inatomic_nocache(vaddr,
0839ccb8 591 user_data, length);
3e4d3af5 592 io_mapping_unmap_atomic(vaddr_atomic);
fbd5a26d 593 return unwritten;
0839ccb8
KP
594}
595
3de09aa3
EA
596/**
597 * This is the fast pwrite path, where we copy the data directly from the
598 * user into the GTT, uncached.
599 */
673a394b 600static int
05394f39
CW
601i915_gem_gtt_pwrite_fast(struct drm_device *dev,
602 struct drm_i915_gem_object *obj,
3de09aa3 603 struct drm_i915_gem_pwrite *args,
05394f39 604 struct drm_file *file)
673a394b 605{
0839ccb8 606 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 607 ssize_t remain;
0839ccb8 608 loff_t offset, page_base;
673a394b 609 char __user *user_data;
935aaa69
DV
610 int page_offset, page_length, ret;
611
612 ret = i915_gem_object_pin(obj, 0, true);
613 if (ret)
614 goto out;
615
616 ret = i915_gem_object_set_to_gtt_domain(obj, true);
617 if (ret)
618 goto out_unpin;
619
620 ret = i915_gem_object_put_fence(obj);
621 if (ret)
622 goto out_unpin;
673a394b
EA
623
624 user_data = (char __user *) (uintptr_t) args->data_ptr;
625 remain = args->size;
673a394b 626
05394f39 627 offset = obj->gtt_offset + args->offset;
673a394b
EA
628
629 while (remain > 0) {
630 /* Operation in this page
631 *
0839ccb8
KP
632 * page_base = page offset within aperture
633 * page_offset = offset within page
634 * page_length = bytes to copy for this page
673a394b 635 */
c8cbbb8b
CW
636 page_base = offset & PAGE_MASK;
637 page_offset = offset_in_page(offset);
0839ccb8
KP
638 page_length = remain;
639 if ((page_offset + remain) > PAGE_SIZE)
640 page_length = PAGE_SIZE - page_offset;
641
0839ccb8 642 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
643 * source page isn't available. Return the error and we'll
644 * retry in the slow path.
0839ccb8 645 */
fbd5a26d 646 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
935aaa69
DV
647 page_offset, user_data, page_length)) {
648 ret = -EFAULT;
649 goto out_unpin;
650 }
673a394b 651
0839ccb8
KP
652 remain -= page_length;
653 user_data += page_length;
654 offset += page_length;
673a394b 655 }
673a394b 656
935aaa69
DV
657out_unpin:
658 i915_gem_object_unpin(obj);
659out:
3de09aa3 660 return ret;
673a394b
EA
661}
662
d174bd64
DV
663/* Per-page copy function for the shmem pwrite fastpath.
664 * Flushes invalid cachelines before writing to the target if
665 * needs_clflush_before is set and flushes out any written cachelines after
666 * writing if needs_clflush is set. */
3043c60c 667static int
d174bd64
DV
668shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
669 char __user *user_data,
670 bool page_do_bit17_swizzling,
671 bool needs_clflush_before,
672 bool needs_clflush_after)
673a394b 673{
d174bd64 674 char *vaddr;
673a394b 675 int ret;
3de09aa3 676
e7e58eb5 677 if (unlikely(page_do_bit17_swizzling))
d174bd64 678 return -EINVAL;
3de09aa3 679
d174bd64
DV
680 vaddr = kmap_atomic(page);
681 if (needs_clflush_before)
682 drm_clflush_virt_range(vaddr + shmem_page_offset,
683 page_length);
684 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
685 user_data,
686 page_length);
687 if (needs_clflush_after)
688 drm_clflush_virt_range(vaddr + shmem_page_offset,
689 page_length);
690 kunmap_atomic(vaddr);
3de09aa3
EA
691
692 return ret;
693}
694
d174bd64
DV
695/* Only difference to the fast-path function is that this can handle bit17
696 * and uses non-atomic copy and kmap functions. */
3043c60c 697static int
d174bd64
DV
698shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
699 char __user *user_data,
700 bool page_do_bit17_swizzling,
701 bool needs_clflush_before,
702 bool needs_clflush_after)
673a394b 703{
d174bd64
DV
704 char *vaddr;
705 int ret;
e5281ccd 706
d174bd64 707 vaddr = kmap(page);
e7e58eb5 708 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
23c18c71
DV
709 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
710 page_length,
711 page_do_bit17_swizzling);
d174bd64
DV
712 if (page_do_bit17_swizzling)
713 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
e5281ccd
CW
714 user_data,
715 page_length);
d174bd64
DV
716 else
717 ret = __copy_from_user(vaddr + shmem_page_offset,
718 user_data,
719 page_length);
720 if (needs_clflush_after)
23c18c71
DV
721 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
722 page_length,
723 page_do_bit17_swizzling);
d174bd64 724 kunmap(page);
40123c1f 725
d174bd64 726 return ret;
40123c1f
EA
727}
728
40123c1f 729static int
e244a443
DV
730i915_gem_shmem_pwrite(struct drm_device *dev,
731 struct drm_i915_gem_object *obj,
732 struct drm_i915_gem_pwrite *args,
733 struct drm_file *file)
40123c1f 734{
05394f39 735 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
40123c1f 736 ssize_t remain;
8c59967c
DV
737 loff_t offset;
738 char __user *user_data;
eb2c0c81 739 int shmem_page_offset, page_length, ret = 0;
8c59967c 740 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
e244a443 741 int hit_slowpath = 0;
58642885
DV
742 int needs_clflush_after = 0;
743 int needs_clflush_before = 0;
692a576b 744 int release_page;
40123c1f 745
8c59967c 746 user_data = (char __user *) (uintptr_t) args->data_ptr;
40123c1f
EA
747 remain = args->size;
748
8c59967c 749 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
40123c1f 750
58642885
DV
751 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
752 /* If we're not in the cpu write domain, set ourself into the gtt
753 * write domain and manually flush cachelines (if required). This
754 * optimizes for the case when the gpu will use the data
755 * right away and we therefore have to clflush anyway. */
756 if (obj->cache_level == I915_CACHE_NONE)
757 needs_clflush_after = 1;
6c085a72
CW
758 if (obj->gtt_space) {
759 ret = i915_gem_object_set_to_gtt_domain(obj, true);
760 if (ret)
761 return ret;
762 }
58642885
DV
763 }
764 /* Same trick applies for invalidate partially written cachelines before
765 * writing. */
766 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
767 && obj->cache_level == I915_CACHE_NONE)
768 needs_clflush_before = 1;
769
673a394b 770 offset = args->offset;
05394f39 771 obj->dirty = 1;
673a394b 772
40123c1f 773 while (remain > 0) {
e5281ccd 774 struct page *page;
58642885 775 int partial_cacheline_write;
e5281ccd 776
40123c1f
EA
777 /* Operation in this page
778 *
40123c1f 779 * shmem_page_offset = offset within page in shmem file
40123c1f
EA
780 * page_length = bytes to copy for this page
781 */
c8cbbb8b 782 shmem_page_offset = offset_in_page(offset);
40123c1f
EA
783
784 page_length = remain;
785 if ((shmem_page_offset + page_length) > PAGE_SIZE)
786 page_length = PAGE_SIZE - shmem_page_offset;
40123c1f 787
58642885
DV
788 /* If we don't overwrite a cacheline completely we need to be
789 * careful to have up-to-date data by first clflushing. Don't
790 * overcomplicate things and flush the entire patch. */
791 partial_cacheline_write = needs_clflush_before &&
792 ((shmem_page_offset | page_length)
793 & (boot_cpu_data.x86_clflush_size - 1));
794
692a576b
DV
795 if (obj->pages) {
796 page = obj->pages[offset >> PAGE_SHIFT];
797 release_page = 0;
798 } else {
799 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
800 if (IS_ERR(page)) {
801 ret = PTR_ERR(page);
802 goto out;
803 }
804 release_page = 1;
e5281ccd
CW
805 }
806
8c59967c
DV
807 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
808 (page_to_phys(page) & (1 << 17)) != 0;
809
d174bd64
DV
810 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
811 user_data, page_do_bit17_swizzling,
812 partial_cacheline_write,
813 needs_clflush_after);
814 if (ret == 0)
815 goto next_page;
e244a443
DV
816
817 hit_slowpath = 1;
692a576b 818 page_cache_get(page);
e244a443
DV
819 mutex_unlock(&dev->struct_mutex);
820
d174bd64
DV
821 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
822 user_data, page_do_bit17_swizzling,
823 partial_cacheline_write,
824 needs_clflush_after);
40123c1f 825
e244a443 826 mutex_lock(&dev->struct_mutex);
692a576b 827 page_cache_release(page);
e244a443 828next_page:
e5281ccd
CW
829 set_page_dirty(page);
830 mark_page_accessed(page);
692a576b
DV
831 if (release_page)
832 page_cache_release(page);
e5281ccd 833
8c59967c
DV
834 if (ret) {
835 ret = -EFAULT;
836 goto out;
837 }
838
40123c1f 839 remain -= page_length;
8c59967c 840 user_data += page_length;
40123c1f 841 offset += page_length;
673a394b
EA
842 }
843
fbd5a26d 844out:
e244a443
DV
845 if (hit_slowpath) {
846 /* Fixup: Kill any reinstated backing storage pages */
847 if (obj->madv == __I915_MADV_PURGED)
848 i915_gem_object_truncate(obj);
849 /* and flush dirty cachelines in case the object isn't in the cpu write
850 * domain anymore. */
851 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
852 i915_gem_clflush_object(obj);
853 intel_gtt_chipset_flush();
854 }
8c59967c 855 }
673a394b 856
58642885
DV
857 if (needs_clflush_after)
858 intel_gtt_chipset_flush();
859
40123c1f 860 return ret;
673a394b
EA
861}
862
863/**
864 * Writes data to the object referenced by handle.
865 *
866 * On error, the contents of the buffer that were to be modified are undefined.
867 */
868int
869i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
fbd5a26d 870 struct drm_file *file)
673a394b
EA
871{
872 struct drm_i915_gem_pwrite *args = data;
05394f39 873 struct drm_i915_gem_object *obj;
51311d0a
CW
874 int ret;
875
876 if (args->size == 0)
877 return 0;
878
879 if (!access_ok(VERIFY_READ,
880 (char __user *)(uintptr_t)args->data_ptr,
881 args->size))
882 return -EFAULT;
883
f56f821f
DV
884 ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
885 args->size);
51311d0a
CW
886 if (ret)
887 return -EFAULT;
673a394b 888
fbd5a26d 889 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 890 if (ret)
fbd5a26d 891 return ret;
1d7cfea1 892
05394f39 893 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 894 if (&obj->base == NULL) {
1d7cfea1
CW
895 ret = -ENOENT;
896 goto unlock;
fbd5a26d 897 }
673a394b 898
7dcd2499 899 /* Bounds check destination. */
05394f39
CW
900 if (args->offset > obj->base.size ||
901 args->size > obj->base.size - args->offset) {
ce9d419d 902 ret = -EINVAL;
35b62a89 903 goto out;
ce9d419d
CW
904 }
905
1286ff73
DV
906 /* prime objects have no backing filp to GEM pread/pwrite
907 * pages from.
908 */
909 if (!obj->base.filp) {
910 ret = -EINVAL;
911 goto out;
912 }
913
db53a302
CW
914 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
915
935aaa69 916 ret = -EFAULT;
673a394b
EA
917 /* We can only do the GTT pwrite on untiled buffers, as otherwise
918 * it would end up going through the fenced access, and we'll get
919 * different detiling behavior between reading and writing.
920 * pread/pwrite currently are reading and writing from the CPU
921 * perspective, requiring manual detiling by the client.
922 */
5c0480f2 923 if (obj->phys_obj) {
fbd5a26d 924 ret = i915_gem_phys_pwrite(dev, obj, args, file);
5c0480f2
DV
925 goto out;
926 }
927
928 if (obj->gtt_space &&
3ae53783 929 obj->cache_level == I915_CACHE_NONE &&
c07496fa 930 obj->tiling_mode == I915_TILING_NONE &&
ffc62976 931 obj->map_and_fenceable &&
5c0480f2 932 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
fbd5a26d 933 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
935aaa69
DV
934 /* Note that the gtt paths might fail with non-page-backed user
935 * pointers (e.g. gtt mappings when moving data between
936 * textures). Fallback to the shmem path in that case. */
fbd5a26d 937 }
673a394b 938
5c0480f2 939 if (ret == -EFAULT)
935aaa69 940 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
5c0480f2 941
35b62a89 942out:
05394f39 943 drm_gem_object_unreference(&obj->base);
1d7cfea1 944unlock:
fbd5a26d 945 mutex_unlock(&dev->struct_mutex);
673a394b
EA
946 return ret;
947}
948
949/**
2ef7eeaa
EA
950 * Called when user space prepares to use an object with the CPU, either
951 * through the mmap ioctl's mapping or a GTT mapping.
673a394b
EA
952 */
953int
954i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
05394f39 955 struct drm_file *file)
673a394b
EA
956{
957 struct drm_i915_gem_set_domain *args = data;
05394f39 958 struct drm_i915_gem_object *obj;
2ef7eeaa
EA
959 uint32_t read_domains = args->read_domains;
960 uint32_t write_domain = args->write_domain;
673a394b
EA
961 int ret;
962
2ef7eeaa 963 /* Only handle setting domains to types used by the CPU. */
21d509e3 964 if (write_domain & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
965 return -EINVAL;
966
21d509e3 967 if (read_domains & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
968 return -EINVAL;
969
970 /* Having something in the write domain implies it's in the read
971 * domain, and only that read domain. Enforce that in the request.
972 */
973 if (write_domain != 0 && read_domains != write_domain)
974 return -EINVAL;
975
76c1dec1 976 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 977 if (ret)
76c1dec1 978 return ret;
1d7cfea1 979
05394f39 980 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 981 if (&obj->base == NULL) {
1d7cfea1
CW
982 ret = -ENOENT;
983 goto unlock;
76c1dec1 984 }
673a394b 985
2ef7eeaa
EA
986 if (read_domains & I915_GEM_DOMAIN_GTT) {
987 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
02354392
EA
988
989 /* Silently promote "you're not bound, there was nothing to do"
990 * to success, since the client was just asking us to
991 * make sure everything was done.
992 */
993 if (ret == -EINVAL)
994 ret = 0;
2ef7eeaa 995 } else {
e47c68e9 996 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
2ef7eeaa
EA
997 }
998
05394f39 999 drm_gem_object_unreference(&obj->base);
1d7cfea1 1000unlock:
673a394b
EA
1001 mutex_unlock(&dev->struct_mutex);
1002 return ret;
1003}
1004
1005/**
1006 * Called when user space has done writes to this buffer
1007 */
1008int
1009i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
05394f39 1010 struct drm_file *file)
673a394b
EA
1011{
1012 struct drm_i915_gem_sw_finish *args = data;
05394f39 1013 struct drm_i915_gem_object *obj;
673a394b
EA
1014 int ret = 0;
1015
76c1dec1 1016 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1017 if (ret)
76c1dec1 1018 return ret;
1d7cfea1 1019
05394f39 1020 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 1021 if (&obj->base == NULL) {
1d7cfea1
CW
1022 ret = -ENOENT;
1023 goto unlock;
673a394b
EA
1024 }
1025
673a394b 1026 /* Pinned buffers may be scanout, so flush the cache */
05394f39 1027 if (obj->pin_count)
e47c68e9
EA
1028 i915_gem_object_flush_cpu_write_domain(obj);
1029
05394f39 1030 drm_gem_object_unreference(&obj->base);
1d7cfea1 1031unlock:
673a394b
EA
1032 mutex_unlock(&dev->struct_mutex);
1033 return ret;
1034}
1035
1036/**
1037 * Maps the contents of an object, returning the address it is mapped
1038 * into.
1039 *
1040 * While the mapping holds a reference on the contents of the object, it doesn't
1041 * imply a ref on the object itself.
1042 */
1043int
1044i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
05394f39 1045 struct drm_file *file)
673a394b
EA
1046{
1047 struct drm_i915_gem_mmap *args = data;
1048 struct drm_gem_object *obj;
673a394b
EA
1049 unsigned long addr;
1050
05394f39 1051 obj = drm_gem_object_lookup(dev, file, args->handle);
673a394b 1052 if (obj == NULL)
bf79cb91 1053 return -ENOENT;
673a394b 1054
1286ff73
DV
1055 /* prime objects have no backing filp to GEM mmap
1056 * pages from.
1057 */
1058 if (!obj->filp) {
1059 drm_gem_object_unreference_unlocked(obj);
1060 return -EINVAL;
1061 }
1062
6be5ceb0 1063 addr = vm_mmap(obj->filp, 0, args->size,
673a394b
EA
1064 PROT_READ | PROT_WRITE, MAP_SHARED,
1065 args->offset);
bc9025bd 1066 drm_gem_object_unreference_unlocked(obj);
673a394b
EA
1067 if (IS_ERR((void *)addr))
1068 return addr;
1069
1070 args->addr_ptr = (uint64_t) addr;
1071
1072 return 0;
1073}
1074
de151cf6
JB
1075/**
1076 * i915_gem_fault - fault a page into the GTT
1077 * vma: VMA in question
1078 * vmf: fault info
1079 *
1080 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1081 * from userspace. The fault handler takes care of binding the object to
1082 * the GTT (if needed), allocating and programming a fence register (again,
1083 * only if needed based on whether the old reg is still valid or the object
1084 * is tiled) and inserting a new PTE into the faulting process.
1085 *
1086 * Note that the faulting process may involve evicting existing objects
1087 * from the GTT and/or fence registers to make room. So performance may
1088 * suffer if the GTT working set is large or there are few fence registers
1089 * left.
1090 */
1091int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1092{
05394f39
CW
1093 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1094 struct drm_device *dev = obj->base.dev;
7d1c4804 1095 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6
JB
1096 pgoff_t page_offset;
1097 unsigned long pfn;
1098 int ret = 0;
0f973f27 1099 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
de151cf6
JB
1100
1101 /* We don't use vmf->pgoff since that has the fake offset */
1102 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1103 PAGE_SHIFT;
1104
d9bc7e9f
CW
1105 ret = i915_mutex_lock_interruptible(dev);
1106 if (ret)
1107 goto out;
a00b10c3 1108
db53a302
CW
1109 trace_i915_gem_object_fault(obj, page_offset, true, write);
1110
d9bc7e9f 1111 /* Now bind it into the GTT if needed */
919926ae
CW
1112 if (!obj->map_and_fenceable) {
1113 ret = i915_gem_object_unbind(obj);
1114 if (ret)
1115 goto unlock;
a00b10c3 1116 }
05394f39 1117 if (!obj->gtt_space) {
75e9e915 1118 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
c715089f
CW
1119 if (ret)
1120 goto unlock;
de151cf6 1121
e92d03bf
EA
1122 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1123 if (ret)
1124 goto unlock;
1125 }
4a684a41 1126
74898d7e
DV
1127 if (!obj->has_global_gtt_mapping)
1128 i915_gem_gtt_bind_object(obj, obj->cache_level);
1129
06d98131 1130 ret = i915_gem_object_get_fence(obj);
d9e86c0e
CW
1131 if (ret)
1132 goto unlock;
de151cf6 1133
05394f39
CW
1134 if (i915_gem_object_is_inactive(obj))
1135 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
7d1c4804 1136
6299f992
CW
1137 obj->fault_mappable = true;
1138
dd2757f8 1139 pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
de151cf6
JB
1140 page_offset;
1141
1142 /* Finally, remap it using the new GTT offset */
1143 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
c715089f 1144unlock:
de151cf6 1145 mutex_unlock(&dev->struct_mutex);
d9bc7e9f 1146out:
de151cf6 1147 switch (ret) {
d9bc7e9f 1148 case -EIO:
a9340cca
DV
1149 /* If this -EIO is due to a gpu hang, give the reset code a
1150 * chance to clean up the mess. Otherwise return the proper
1151 * SIGBUS. */
1152 if (!atomic_read(&dev_priv->mm.wedged))
1153 return VM_FAULT_SIGBUS;
045e769a 1154 case -EAGAIN:
d9bc7e9f
CW
1155 /* Give the error handler a chance to run and move the
1156 * objects off the GPU active list. Next time we service the
1157 * fault, we should be able to transition the page into the
1158 * GTT without touching the GPU (and so avoid further
1159 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1160 * with coherency, just lost writes.
1161 */
045e769a 1162 set_need_resched();
c715089f
CW
1163 case 0:
1164 case -ERESTARTSYS:
bed636ab 1165 case -EINTR:
c715089f 1166 return VM_FAULT_NOPAGE;
de151cf6 1167 case -ENOMEM:
de151cf6 1168 return VM_FAULT_OOM;
de151cf6 1169 default:
c715089f 1170 return VM_FAULT_SIGBUS;
de151cf6
JB
1171 }
1172}
1173
901782b2
CW
1174/**
1175 * i915_gem_release_mmap - remove physical page mappings
1176 * @obj: obj in question
1177 *
af901ca1 1178 * Preserve the reservation of the mmapping with the DRM core code, but
901782b2
CW
1179 * relinquish ownership of the pages back to the system.
1180 *
1181 * It is vital that we remove the page mapping if we have mapped a tiled
1182 * object through the GTT and then lose the fence register due to
1183 * resource pressure. Similarly if the object has been moved out of the
1184 * aperture, than pages mapped into userspace must be revoked. Removing the
1185 * mapping will then trigger a page fault on the next user access, allowing
1186 * fixup by i915_gem_fault().
1187 */
d05ca301 1188void
05394f39 1189i915_gem_release_mmap(struct drm_i915_gem_object *obj)
901782b2 1190{
6299f992
CW
1191 if (!obj->fault_mappable)
1192 return;
901782b2 1193
f6e47884
CW
1194 if (obj->base.dev->dev_mapping)
1195 unmap_mapping_range(obj->base.dev->dev_mapping,
1196 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1197 obj->base.size, 1);
fb7d516a 1198
6299f992 1199 obj->fault_mappable = false;
901782b2
CW
1200}
1201
92b88aeb 1202static uint32_t
e28f8711 1203i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
92b88aeb 1204{
e28f8711 1205 uint32_t gtt_size;
92b88aeb
CW
1206
1207 if (INTEL_INFO(dev)->gen >= 4 ||
e28f8711
CW
1208 tiling_mode == I915_TILING_NONE)
1209 return size;
92b88aeb
CW
1210
1211 /* Previous chips need a power-of-two fence region when tiling */
1212 if (INTEL_INFO(dev)->gen == 3)
e28f8711 1213 gtt_size = 1024*1024;
92b88aeb 1214 else
e28f8711 1215 gtt_size = 512*1024;
92b88aeb 1216
e28f8711
CW
1217 while (gtt_size < size)
1218 gtt_size <<= 1;
92b88aeb 1219
e28f8711 1220 return gtt_size;
92b88aeb
CW
1221}
1222
de151cf6
JB
1223/**
1224 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1225 * @obj: object to check
1226 *
1227 * Return the required GTT alignment for an object, taking into account
5e783301 1228 * potential fence register mapping.
de151cf6
JB
1229 */
1230static uint32_t
e28f8711
CW
1231i915_gem_get_gtt_alignment(struct drm_device *dev,
1232 uint32_t size,
1233 int tiling_mode)
de151cf6 1234{
de151cf6
JB
1235 /*
1236 * Minimum alignment is 4k (GTT page size), but might be greater
1237 * if a fence register is needed for the object.
1238 */
a00b10c3 1239 if (INTEL_INFO(dev)->gen >= 4 ||
e28f8711 1240 tiling_mode == I915_TILING_NONE)
de151cf6
JB
1241 return 4096;
1242
a00b10c3
CW
1243 /*
1244 * Previous chips need to be aligned to the size of the smallest
1245 * fence register that can contain the object.
1246 */
e28f8711 1247 return i915_gem_get_gtt_size(dev, size, tiling_mode);
a00b10c3
CW
1248}
1249
5e783301
DV
1250/**
1251 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1252 * unfenced object
e28f8711
CW
1253 * @dev: the device
1254 * @size: size of the object
1255 * @tiling_mode: tiling mode of the object
5e783301
DV
1256 *
1257 * Return the required GTT alignment for an object, only taking into account
1258 * unfenced tiled surface requirements.
1259 */
467cffba 1260uint32_t
e28f8711
CW
1261i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1262 uint32_t size,
1263 int tiling_mode)
5e783301 1264{
5e783301
DV
1265 /*
1266 * Minimum alignment is 4k (GTT page size) for sane hw.
1267 */
1268 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
e28f8711 1269 tiling_mode == I915_TILING_NONE)
5e783301
DV
1270 return 4096;
1271
e28f8711
CW
1272 /* Previous hardware however needs to be aligned to a power-of-two
1273 * tile height. The simplest method for determining this is to reuse
1274 * the power-of-tile object size.
5e783301 1275 */
e28f8711 1276 return i915_gem_get_gtt_size(dev, size, tiling_mode);
5e783301
DV
1277}
1278
de151cf6 1279int
ff72145b
DA
1280i915_gem_mmap_gtt(struct drm_file *file,
1281 struct drm_device *dev,
1282 uint32_t handle,
1283 uint64_t *offset)
de151cf6 1284{
da761a6e 1285 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 1286 struct drm_i915_gem_object *obj;
de151cf6
JB
1287 int ret;
1288
76c1dec1 1289 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1290 if (ret)
76c1dec1 1291 return ret;
de151cf6 1292
ff72145b 1293 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
c8725226 1294 if (&obj->base == NULL) {
1d7cfea1
CW
1295 ret = -ENOENT;
1296 goto unlock;
1297 }
de151cf6 1298
05394f39 1299 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
da761a6e 1300 ret = -E2BIG;
ff56b0bc 1301 goto out;
da761a6e
CW
1302 }
1303
05394f39 1304 if (obj->madv != I915_MADV_WILLNEED) {
ab18282d 1305 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1d7cfea1
CW
1306 ret = -EINVAL;
1307 goto out;
ab18282d
CW
1308 }
1309
05394f39 1310 if (!obj->base.map_list.map) {
b464e9a2 1311 ret = drm_gem_create_mmap_offset(&obj->base);
1d7cfea1
CW
1312 if (ret)
1313 goto out;
de151cf6
JB
1314 }
1315
ff72145b 1316 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
de151cf6 1317
1d7cfea1 1318out:
05394f39 1319 drm_gem_object_unreference(&obj->base);
1d7cfea1 1320unlock:
de151cf6 1321 mutex_unlock(&dev->struct_mutex);
1d7cfea1 1322 return ret;
de151cf6
JB
1323}
1324
ff72145b
DA
1325/**
1326 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1327 * @dev: DRM device
1328 * @data: GTT mapping ioctl data
1329 * @file: GEM object info
1330 *
1331 * Simply returns the fake offset to userspace so it can mmap it.
1332 * The mmap call will end up in drm_gem_mmap(), which will set things
1333 * up so we can get faults in the handler above.
1334 *
1335 * The fault handler will take care of binding the object into the GTT
1336 * (since it may have been evicted to make room for something), allocating
1337 * a fence register, and mapping the appropriate aperture address into
1338 * userspace.
1339 */
1340int
1341i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1342 struct drm_file *file)
1343{
1344 struct drm_i915_gem_mmap_gtt *args = data;
1345
ff72145b
DA
1346 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1347}
1348
225067ee
DV
1349/* Immediately discard the backing storage */
1350static void
1351i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1352{
1353 struct inode *inode;
1354
1355 /* Our goal here is to return as much of the memory as
1356 * is possible back to the system as we are called from OOM.
1357 * To do this we must instruct the shmfs to drop all of its
1358 * backing pages, *now*.
1359 */
1360 inode = obj->base.filp->f_path.dentry->d_inode;
1361 shmem_truncate_range(inode, 0, (loff_t)-1);
1362
1363 if (obj->base.map_list.map)
1364 drm_gem_free_mmap_offset(&obj->base);
1365
1366 obj->madv = __I915_MADV_PURGED;
1367}
1368
1369static inline int
1370i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1371{
1372 return obj->madv == I915_MADV_DONTNEED;
1373}
1374
6c085a72 1375static int
225067ee
DV
1376i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1377{
1378 int page_count = obj->base.size / PAGE_SIZE;
6c085a72 1379 int ret, i;
225067ee 1380
6c085a72
CW
1381 if (obj->pages == NULL)
1382 return 0;
225067ee 1383
6c085a72 1384 BUG_ON(obj->gtt_space);
225067ee
DV
1385 BUG_ON(obj->madv == __I915_MADV_PURGED);
1386
6c085a72
CW
1387 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1388 if (ret) {
1389 /* In the event of a disaster, abandon all caches and
1390 * hope for the best.
1391 */
1392 WARN_ON(ret != -EIO);
1393 i915_gem_clflush_object(obj);
1394 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1395 }
1396
225067ee
DV
1397 if (i915_gem_object_needs_bit17_swizzle(obj))
1398 i915_gem_object_save_bit_17_swizzle(obj);
1399
1400 if (obj->madv == I915_MADV_DONTNEED)
1401 obj->dirty = 0;
1402
1403 for (i = 0; i < page_count; i++) {
1404 if (obj->dirty)
1405 set_page_dirty(obj->pages[i]);
1406
1407 if (obj->madv == I915_MADV_WILLNEED)
1408 mark_page_accessed(obj->pages[i]);
1409
1410 page_cache_release(obj->pages[i]);
1411 }
1412 obj->dirty = 0;
1413
1414 drm_free_large(obj->pages);
1415 obj->pages = NULL;
6c085a72
CW
1416
1417 list_del(&obj->gtt_list);
1418
1419 if (i915_gem_object_is_purgeable(obj))
1420 i915_gem_object_truncate(obj);
1421
1422 return 0;
1423}
1424
1425static long
1426i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1427{
1428 struct drm_i915_gem_object *obj, *next;
1429 long count = 0;
1430
1431 list_for_each_entry_safe(obj, next,
1432 &dev_priv->mm.unbound_list,
1433 gtt_list) {
1434 if (i915_gem_object_is_purgeable(obj) &&
1435 i915_gem_object_put_pages_gtt(obj) == 0) {
1436 count += obj->base.size >> PAGE_SHIFT;
1437 if (count >= target)
1438 return count;
1439 }
1440 }
1441
1442 list_for_each_entry_safe(obj, next,
1443 &dev_priv->mm.inactive_list,
1444 mm_list) {
1445 if (i915_gem_object_is_purgeable(obj) &&
1446 i915_gem_object_unbind(obj) == 0 &&
1447 i915_gem_object_put_pages_gtt(obj) == 0) {
1448 count += obj->base.size >> PAGE_SHIFT;
1449 if (count >= target)
1450 return count;
1451 }
1452 }
1453
1454 return count;
1455}
1456
1457static void
1458i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1459{
1460 struct drm_i915_gem_object *obj, *next;
1461
1462 i915_gem_evict_everything(dev_priv->dev);
1463
1464 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
1465 i915_gem_object_put_pages_gtt(obj);
225067ee
DV
1466}
1467
1286ff73 1468int
6c085a72 1469i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
e5281ccd 1470{
6c085a72 1471 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
e5281ccd
CW
1472 int page_count, i;
1473 struct address_space *mapping;
e5281ccd 1474 struct page *page;
6c085a72 1475 gfp_t gfp;
e5281ccd 1476
1286ff73
DV
1477 if (obj->pages || obj->sg_table)
1478 return 0;
1479
6c085a72
CW
1480 /* Assert that the object is not currently in any GPU domain. As it
1481 * wasn't in the GTT, there shouldn't be any way it could have been in
1482 * a GPU cache
1483 */
1484 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1485 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1486
e5281ccd
CW
1487 /* Get the list of pages out of our struct file. They'll be pinned
1488 * at this point until we release them.
1489 */
05394f39 1490 page_count = obj->base.size / PAGE_SIZE;
05394f39
CW
1491 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1492 if (obj->pages == NULL)
e5281ccd
CW
1493 return -ENOMEM;
1494
6c085a72
CW
1495 /* Fail silently without starting the shrinker */
1496 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
1497 gfp = mapping_gfp_mask(mapping);
1498 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1499 gfp &= ~(__GFP_IO | __GFP_WAIT);
e5281ccd 1500 for (i = 0; i < page_count; i++) {
6c085a72
CW
1501 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1502 if (IS_ERR(page)) {
1503 i915_gem_purge(dev_priv, page_count);
1504 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1505 }
1506 if (IS_ERR(page)) {
1507 /* We've tried hard to allocate the memory by reaping
1508 * our own buffer, now let the real VM do its job and
1509 * go down in flames if truly OOM.
1510 */
1511 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
1512 gfp |= __GFP_IO | __GFP_WAIT;
1513
1514 i915_gem_shrink_all(dev_priv);
1515 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1516 if (IS_ERR(page))
1517 goto err_pages;
1518
1519 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1520 gfp &= ~(__GFP_IO | __GFP_WAIT);
1521 }
e5281ccd 1522
05394f39 1523 obj->pages[i] = page;
e5281ccd
CW
1524 }
1525
6dacfd2f 1526 if (i915_gem_object_needs_bit17_swizzle(obj))
e5281ccd
CW
1527 i915_gem_object_do_bit_17_swizzle(obj);
1528
6c085a72 1529 list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
e5281ccd
CW
1530 return 0;
1531
1532err_pages:
1533 while (i--)
05394f39 1534 page_cache_release(obj->pages[i]);
e5281ccd 1535
05394f39
CW
1536 drm_free_large(obj->pages);
1537 obj->pages = NULL;
e5281ccd
CW
1538 return PTR_ERR(page);
1539}
1540
54cf91dc 1541void
05394f39 1542i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1ec14ad3
CW
1543 struct intel_ring_buffer *ring,
1544 u32 seqno)
673a394b 1545{
05394f39 1546 struct drm_device *dev = obj->base.dev;
69dc4987 1547 struct drm_i915_private *dev_priv = dev->dev_private;
617dbe27 1548
852835f3 1549 BUG_ON(ring == NULL);
05394f39 1550 obj->ring = ring;
673a394b
EA
1551
1552 /* Add a reference if we're newly entering the active list. */
05394f39
CW
1553 if (!obj->active) {
1554 drm_gem_object_reference(&obj->base);
1555 obj->active = 1;
673a394b 1556 }
e35a41de 1557
673a394b 1558 /* Move from whatever list we were on to the tail of execution. */
05394f39
CW
1559 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1560 list_move_tail(&obj->ring_list, &ring->active_list);
caea7476 1561
0201f1ec 1562 obj->last_read_seqno = seqno;
caea7476 1563
7dd49065 1564 if (obj->fenced_gpu_access) {
caea7476 1565 obj->last_fenced_seqno = seqno;
caea7476 1566
7dd49065
CW
1567 /* Bump MRU to take account of the delayed flush */
1568 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1569 struct drm_i915_fence_reg *reg;
1570
1571 reg = &dev_priv->fence_regs[obj->fence_reg];
1572 list_move_tail(&reg->lru_list,
1573 &dev_priv->mm.fence_list);
1574 }
caea7476
CW
1575 }
1576}
1577
caea7476
CW
1578static void
1579i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1580{
1581 struct drm_device *dev = obj->base.dev;
1582 struct drm_i915_private *dev_priv = dev->dev_private;
1583
65ce3027 1584 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
caea7476 1585 BUG_ON(!obj->active);
65ce3027 1586
f047e395
CW
1587 if (obj->pin_count) /* are we a framebuffer? */
1588 intel_mark_fb_idle(obj);
1589
1590 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1591
65ce3027 1592 list_del_init(&obj->ring_list);
caea7476
CW
1593 obj->ring = NULL;
1594
65ce3027
CW
1595 obj->last_read_seqno = 0;
1596 obj->last_write_seqno = 0;
1597 obj->base.write_domain = 0;
1598
1599 obj->last_fenced_seqno = 0;
caea7476 1600 obj->fenced_gpu_access = false;
caea7476
CW
1601
1602 obj->active = 0;
1603 drm_gem_object_unreference(&obj->base);
1604
1605 WARN_ON(i915_verify_lists(dev));
ce44b0ea 1606}
673a394b 1607
53d227f2
DV
1608static u32
1609i915_gem_get_seqno(struct drm_device *dev)
1610{
1611 drm_i915_private_t *dev_priv = dev->dev_private;
1612 u32 seqno = dev_priv->next_seqno;
1613
1614 /* reserve 0 for non-seqno */
1615 if (++dev_priv->next_seqno == 0)
1616 dev_priv->next_seqno = 1;
1617
1618 return seqno;
1619}
1620
1621u32
1622i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1623{
1624 if (ring->outstanding_lazy_request == 0)
1625 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
1626
1627 return ring->outstanding_lazy_request;
1628}
1629
3cce469c 1630int
db53a302 1631i915_add_request(struct intel_ring_buffer *ring,
f787a5f5 1632 struct drm_file *file,
db53a302 1633 struct drm_i915_gem_request *request)
673a394b 1634{
db53a302 1635 drm_i915_private_t *dev_priv = ring->dev->dev_private;
673a394b 1636 uint32_t seqno;
a71d8d94 1637 u32 request_ring_position;
673a394b 1638 int was_empty;
3cce469c
CW
1639 int ret;
1640
cc889e0f
DV
1641 /*
1642 * Emit any outstanding flushes - execbuf can fail to emit the flush
1643 * after having emitted the batchbuffer command. Hence we need to fix
1644 * things up similar to emitting the lazy request. The difference here
1645 * is that the flush _must_ happen before the next request, no matter
1646 * what.
1647 */
a7b9761d
CW
1648 ret = intel_ring_flush_all_caches(ring);
1649 if (ret)
1650 return ret;
cc889e0f 1651
3bb73aba
CW
1652 if (request == NULL) {
1653 request = kmalloc(sizeof(*request), GFP_KERNEL);
1654 if (request == NULL)
1655 return -ENOMEM;
1656 }
1657
53d227f2 1658 seqno = i915_gem_next_request_seqno(ring);
673a394b 1659
a71d8d94
CW
1660 /* Record the position of the start of the request so that
1661 * should we detect the updated seqno part-way through the
1662 * GPU processing the request, we never over-estimate the
1663 * position of the head.
1664 */
1665 request_ring_position = intel_ring_get_tail(ring);
1666
3cce469c 1667 ret = ring->add_request(ring, &seqno);
3bb73aba
CW
1668 if (ret) {
1669 kfree(request);
1670 return ret;
1671 }
673a394b 1672
db53a302 1673 trace_i915_gem_request_add(ring, seqno);
673a394b
EA
1674
1675 request->seqno = seqno;
852835f3 1676 request->ring = ring;
a71d8d94 1677 request->tail = request_ring_position;
673a394b 1678 request->emitted_jiffies = jiffies;
852835f3
ZN
1679 was_empty = list_empty(&ring->request_list);
1680 list_add_tail(&request->list, &ring->request_list);
3bb73aba 1681 request->file_priv = NULL;
852835f3 1682
db53a302
CW
1683 if (file) {
1684 struct drm_i915_file_private *file_priv = file->driver_priv;
1685
1c25595f 1686 spin_lock(&file_priv->mm.lock);
f787a5f5 1687 request->file_priv = file_priv;
b962442e 1688 list_add_tail(&request->client_list,
f787a5f5 1689 &file_priv->mm.request_list);
1c25595f 1690 spin_unlock(&file_priv->mm.lock);
b962442e 1691 }
673a394b 1692
5391d0cf 1693 ring->outstanding_lazy_request = 0;
db53a302 1694
f65d9421 1695 if (!dev_priv->mm.suspended) {
3e0dc6b0
BW
1696 if (i915_enable_hangcheck) {
1697 mod_timer(&dev_priv->hangcheck_timer,
1698 jiffies +
1699 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1700 }
f047e395 1701 if (was_empty) {
b3b079db
CW
1702 queue_delayed_work(dev_priv->wq,
1703 &dev_priv->mm.retire_work, HZ);
f047e395
CW
1704 intel_mark_busy(dev_priv->dev);
1705 }
f65d9421 1706 }
cc889e0f 1707
3cce469c 1708 return 0;
673a394b
EA
1709}
1710
f787a5f5
CW
1711static inline void
1712i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
673a394b 1713{
1c25595f 1714 struct drm_i915_file_private *file_priv = request->file_priv;
673a394b 1715
1c25595f
CW
1716 if (!file_priv)
1717 return;
1c5d22f7 1718
1c25595f 1719 spin_lock(&file_priv->mm.lock);
09bfa517
HRK
1720 if (request->file_priv) {
1721 list_del(&request->client_list);
1722 request->file_priv = NULL;
1723 }
1c25595f 1724 spin_unlock(&file_priv->mm.lock);
673a394b 1725}
673a394b 1726
dfaae392
CW
1727static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1728 struct intel_ring_buffer *ring)
9375e446 1729{
dfaae392
CW
1730 while (!list_empty(&ring->request_list)) {
1731 struct drm_i915_gem_request *request;
673a394b 1732
dfaae392
CW
1733 request = list_first_entry(&ring->request_list,
1734 struct drm_i915_gem_request,
1735 list);
de151cf6 1736
dfaae392 1737 list_del(&request->list);
f787a5f5 1738 i915_gem_request_remove_from_client(request);
dfaae392
CW
1739 kfree(request);
1740 }
673a394b 1741
dfaae392 1742 while (!list_empty(&ring->active_list)) {
05394f39 1743 struct drm_i915_gem_object *obj;
9375e446 1744
05394f39
CW
1745 obj = list_first_entry(&ring->active_list,
1746 struct drm_i915_gem_object,
1747 ring_list);
9375e446 1748
05394f39 1749 i915_gem_object_move_to_inactive(obj);
673a394b
EA
1750 }
1751}
1752
312817a3
CW
1753static void i915_gem_reset_fences(struct drm_device *dev)
1754{
1755 struct drm_i915_private *dev_priv = dev->dev_private;
1756 int i;
1757
4b9de737 1758 for (i = 0; i < dev_priv->num_fence_regs; i++) {
312817a3 1759 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
7d2cb39c 1760
ada726c7 1761 i915_gem_write_fence(dev, i, NULL);
7d2cb39c 1762
ada726c7
CW
1763 if (reg->obj)
1764 i915_gem_object_fence_lost(reg->obj);
7d2cb39c 1765
ada726c7
CW
1766 reg->pin_count = 0;
1767 reg->obj = NULL;
1768 INIT_LIST_HEAD(&reg->lru_list);
312817a3 1769 }
ada726c7
CW
1770
1771 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
312817a3
CW
1772}
1773
069efc1d 1774void i915_gem_reset(struct drm_device *dev)
673a394b 1775{
77f01230 1776 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 1777 struct drm_i915_gem_object *obj;
b4519513 1778 struct intel_ring_buffer *ring;
1ec14ad3 1779 int i;
673a394b 1780
b4519513
CW
1781 for_each_ring(ring, dev_priv, i)
1782 i915_gem_reset_ring_lists(dev_priv, ring);
dfaae392 1783
dfaae392
CW
1784 /* Move everything out of the GPU domains to ensure we do any
1785 * necessary invalidation upon reuse.
1786 */
05394f39 1787 list_for_each_entry(obj,
77f01230 1788 &dev_priv->mm.inactive_list,
69dc4987 1789 mm_list)
77f01230 1790 {
05394f39 1791 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
77f01230 1792 }
069efc1d 1793
6c085a72 1794
069efc1d 1795 /* The fence registers are invalidated so clear them out */
312817a3 1796 i915_gem_reset_fences(dev);
673a394b
EA
1797}
1798
1799/**
1800 * This function clears the request list as sequence numbers are passed.
1801 */
a71d8d94 1802void
db53a302 1803i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
673a394b 1804{
673a394b 1805 uint32_t seqno;
1ec14ad3 1806 int i;
673a394b 1807
db53a302 1808 if (list_empty(&ring->request_list))
6c0594a3
KW
1809 return;
1810
db53a302 1811 WARN_ON(i915_verify_lists(ring->dev));
673a394b 1812
b2eadbc8 1813 seqno = ring->get_seqno(ring, true);
1ec14ad3 1814
076e2c0e 1815 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1ec14ad3
CW
1816 if (seqno >= ring->sync_seqno[i])
1817 ring->sync_seqno[i] = 0;
1818
852835f3 1819 while (!list_empty(&ring->request_list)) {
673a394b 1820 struct drm_i915_gem_request *request;
673a394b 1821
852835f3 1822 request = list_first_entry(&ring->request_list,
673a394b
EA
1823 struct drm_i915_gem_request,
1824 list);
673a394b 1825
dfaae392 1826 if (!i915_seqno_passed(seqno, request->seqno))
b84d5f0c
CW
1827 break;
1828
db53a302 1829 trace_i915_gem_request_retire(ring, request->seqno);
a71d8d94
CW
1830 /* We know the GPU must have read the request to have
1831 * sent us the seqno + interrupt, so use the position
1832 * of tail of the request to update the last known position
1833 * of the GPU head.
1834 */
1835 ring->last_retired_head = request->tail;
b84d5f0c
CW
1836
1837 list_del(&request->list);
f787a5f5 1838 i915_gem_request_remove_from_client(request);
b84d5f0c
CW
1839 kfree(request);
1840 }
673a394b 1841
b84d5f0c
CW
1842 /* Move any buffers on the active list that are no longer referenced
1843 * by the ringbuffer to the flushing/inactive lists as appropriate.
1844 */
1845 while (!list_empty(&ring->active_list)) {
05394f39 1846 struct drm_i915_gem_object *obj;
b84d5f0c 1847
0206e353 1848 obj = list_first_entry(&ring->active_list,
05394f39
CW
1849 struct drm_i915_gem_object,
1850 ring_list);
673a394b 1851
0201f1ec 1852 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
673a394b 1853 break;
b84d5f0c 1854
65ce3027 1855 i915_gem_object_move_to_inactive(obj);
673a394b 1856 }
9d34e5db 1857
db53a302
CW
1858 if (unlikely(ring->trace_irq_seqno &&
1859 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1ec14ad3 1860 ring->irq_put(ring);
db53a302 1861 ring->trace_irq_seqno = 0;
9d34e5db 1862 }
23bc5982 1863
db53a302 1864 WARN_ON(i915_verify_lists(ring->dev));
673a394b
EA
1865}
1866
b09a1fec
CW
1867void
1868i915_gem_retire_requests(struct drm_device *dev)
1869{
1870 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 1871 struct intel_ring_buffer *ring;
1ec14ad3 1872 int i;
b09a1fec 1873
b4519513
CW
1874 for_each_ring(ring, dev_priv, i)
1875 i915_gem_retire_requests_ring(ring);
b09a1fec
CW
1876}
1877
75ef9da2 1878static void
673a394b
EA
1879i915_gem_retire_work_handler(struct work_struct *work)
1880{
1881 drm_i915_private_t *dev_priv;
1882 struct drm_device *dev;
b4519513 1883 struct intel_ring_buffer *ring;
0a58705b
CW
1884 bool idle;
1885 int i;
673a394b
EA
1886
1887 dev_priv = container_of(work, drm_i915_private_t,
1888 mm.retire_work.work);
1889 dev = dev_priv->dev;
1890
891b48cf
CW
1891 /* Come back later if the device is busy... */
1892 if (!mutex_trylock(&dev->struct_mutex)) {
1893 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1894 return;
1895 }
1896
b09a1fec 1897 i915_gem_retire_requests(dev);
d1b851fc 1898
0a58705b
CW
1899 /* Send a periodic flush down the ring so we don't hold onto GEM
1900 * objects indefinitely.
1901 */
1902 idle = true;
b4519513 1903 for_each_ring(ring, dev_priv, i) {
3bb73aba
CW
1904 if (ring->gpu_caches_dirty)
1905 i915_add_request(ring, NULL, NULL);
0a58705b
CW
1906
1907 idle &= list_empty(&ring->request_list);
1908 }
1909
1910 if (!dev_priv->mm.suspended && !idle)
9c9fe1f8 1911 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
f047e395
CW
1912 if (idle)
1913 intel_mark_idle(dev);
0a58705b 1914
673a394b
EA
1915 mutex_unlock(&dev->struct_mutex);
1916}
1917
d6b2c790
DV
1918int
1919i915_gem_check_wedge(struct drm_i915_private *dev_priv,
1920 bool interruptible)
b4aca010 1921{
b4aca010
BW
1922 if (atomic_read(&dev_priv->mm.wedged)) {
1923 struct completion *x = &dev_priv->error_completion;
1924 bool recovery_complete;
1925 unsigned long flags;
1926
1927 /* Give the error handler a chance to run. */
1928 spin_lock_irqsave(&x->wait.lock, flags);
1929 recovery_complete = x->done > 0;
1930 spin_unlock_irqrestore(&x->wait.lock, flags);
1931
d6b2c790
DV
1932 /* Non-interruptible callers can't handle -EAGAIN, hence return
1933 * -EIO unconditionally for these. */
1934 if (!interruptible)
1935 return -EIO;
1936
1937 /* Recovery complete, but still wedged means reset failure. */
1938 if (recovery_complete)
1939 return -EIO;
1940
1941 return -EAGAIN;
b4aca010
BW
1942 }
1943
1944 return 0;
1945}
1946
1947/*
1948 * Compare seqno against outstanding lazy request. Emit a request if they are
1949 * equal.
1950 */
1951static int
1952i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1953{
3bb73aba 1954 int ret;
b4aca010
BW
1955
1956 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1957
3bb73aba
CW
1958 ret = 0;
1959 if (seqno == ring->outstanding_lazy_request)
1960 ret = i915_add_request(ring, NULL, NULL);
b4aca010
BW
1961
1962 return ret;
1963}
1964
5c81fe85
BW
1965/**
1966 * __wait_seqno - wait until execution of seqno has finished
1967 * @ring: the ring expected to report seqno
1968 * @seqno: duh!
1969 * @interruptible: do an interruptible wait (normally yes)
1970 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1971 *
1972 * Returns 0 if the seqno was found within the alloted time. Else returns the
1973 * errno with remaining time filled in timeout argument.
1974 */
604dd3ec 1975static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
5c81fe85 1976 bool interruptible, struct timespec *timeout)
604dd3ec
BW
1977{
1978 drm_i915_private_t *dev_priv = ring->dev->dev_private;
5c81fe85
BW
1979 struct timespec before, now, wait_time={1,0};
1980 unsigned long timeout_jiffies;
1981 long end;
1982 bool wait_forever = true;
d6b2c790 1983 int ret;
604dd3ec 1984
b2eadbc8 1985 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
604dd3ec
BW
1986 return 0;
1987
1988 trace_i915_gem_request_wait_begin(ring, seqno);
5c81fe85
BW
1989
1990 if (timeout != NULL) {
1991 wait_time = *timeout;
1992 wait_forever = false;
1993 }
1994
1995 timeout_jiffies = timespec_to_jiffies(&wait_time);
1996
604dd3ec
BW
1997 if (WARN_ON(!ring->irq_get(ring)))
1998 return -ENODEV;
1999
5c81fe85
BW
2000 /* Record current time in case interrupted by signal, or wedged * */
2001 getrawmonotonic(&before);
2002
604dd3ec 2003#define EXIT_COND \
b2eadbc8 2004 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
604dd3ec 2005 atomic_read(&dev_priv->mm.wedged))
5c81fe85
BW
2006 do {
2007 if (interruptible)
2008 end = wait_event_interruptible_timeout(ring->irq_queue,
2009 EXIT_COND,
2010 timeout_jiffies);
2011 else
2012 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
2013 timeout_jiffies);
604dd3ec 2014
d6b2c790
DV
2015 ret = i915_gem_check_wedge(dev_priv, interruptible);
2016 if (ret)
2017 end = ret;
5c81fe85
BW
2018 } while (end == 0 && wait_forever);
2019
2020 getrawmonotonic(&now);
604dd3ec
BW
2021
2022 ring->irq_put(ring);
2023 trace_i915_gem_request_wait_end(ring, seqno);
2024#undef EXIT_COND
2025
5c81fe85
BW
2026 if (timeout) {
2027 struct timespec sleep_time = timespec_sub(now, before);
2028 *timeout = timespec_sub(*timeout, sleep_time);
2029 }
2030
2031 switch (end) {
eeef9b38 2032 case -EIO:
5c81fe85
BW
2033 case -EAGAIN: /* Wedged */
2034 case -ERESTARTSYS: /* Signal */
2035 return (int)end;
2036 case 0: /* Timeout */
2037 if (timeout)
2038 set_normalized_timespec(timeout, 0, 0);
2039 return -ETIME;
2040 default: /* Completed */
2041 WARN_ON(end < 0); /* We're not aware of other errors */
2042 return 0;
2043 }
604dd3ec
BW
2044}
2045
db53a302
CW
2046/**
2047 * Waits for a sequence number to be signaled, and cleans up the
2048 * request and object lists appropriately for that event.
2049 */
5a5a0c64 2050int
199b2bc2 2051i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
673a394b 2052{
db53a302 2053 drm_i915_private_t *dev_priv = ring->dev->dev_private;
673a394b
EA
2054 int ret = 0;
2055
2056 BUG_ON(seqno == 0);
2057
d6b2c790 2058 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
b4aca010
BW
2059 if (ret)
2060 return ret;
3cce469c 2061
b4aca010
BW
2062 ret = i915_gem_check_olr(ring, seqno);
2063 if (ret)
2064 return ret;
ffed1d09 2065
5c81fe85 2066 ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
673a394b 2067
673a394b
EA
2068 return ret;
2069}
2070
673a394b
EA
2071/**
2072 * Ensures that all rendering to the object has completed and the object is
2073 * safe to unbind from the GTT or access from the CPU.
2074 */
0201f1ec
CW
2075static __must_check int
2076i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
2077 bool readonly)
673a394b 2078{
0201f1ec 2079 u32 seqno;
673a394b
EA
2080 int ret;
2081
673a394b
EA
2082 /* If there is rendering queued on the buffer being evicted, wait for
2083 * it.
2084 */
0201f1ec
CW
2085 if (readonly)
2086 seqno = obj->last_write_seqno;
2087 else
2088 seqno = obj->last_read_seqno;
2089 if (seqno == 0)
2090 return 0;
2091
2092 ret = i915_wait_seqno(obj->ring, seqno);
2093 if (ret)
2094 return ret;
2095
2096 /* Manually manage the write flush as we may have not yet retired
2097 * the buffer.
2098 */
2099 if (obj->last_write_seqno &&
2100 i915_seqno_passed(seqno, obj->last_write_seqno)) {
2101 obj->last_write_seqno = 0;
2102 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
673a394b
EA
2103 }
2104
0201f1ec 2105 i915_gem_retire_requests_ring(obj->ring);
673a394b
EA
2106 return 0;
2107}
2108
30dfebf3
DV
2109/**
2110 * Ensures that an object will eventually get non-busy by flushing any required
2111 * write domains, emitting any outstanding lazy request and retiring and
2112 * completed requests.
2113 */
2114static int
2115i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2116{
2117 int ret;
2118
2119 if (obj->active) {
0201f1ec 2120 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
30dfebf3
DV
2121 if (ret)
2122 return ret;
0201f1ec 2123
30dfebf3
DV
2124 i915_gem_retire_requests_ring(obj->ring);
2125 }
2126
2127 return 0;
2128}
2129
23ba4fd0
BW
2130/**
2131 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2132 * @DRM_IOCTL_ARGS: standard ioctl arguments
2133 *
2134 * Returns 0 if successful, else an error is returned with the remaining time in
2135 * the timeout parameter.
2136 * -ETIME: object is still busy after timeout
2137 * -ERESTARTSYS: signal interrupted the wait
2138 * -ENONENT: object doesn't exist
2139 * Also possible, but rare:
2140 * -EAGAIN: GPU wedged
2141 * -ENOMEM: damn
2142 * -ENODEV: Internal IRQ fail
2143 * -E?: The add request failed
2144 *
2145 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2146 * non-zero timeout parameter the wait ioctl will wait for the given number of
2147 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2148 * without holding struct_mutex the object may become re-busied before this
2149 * function completes. A similar but shorter * race condition exists in the busy
2150 * ioctl
2151 */
2152int
2153i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2154{
2155 struct drm_i915_gem_wait *args = data;
2156 struct drm_i915_gem_object *obj;
2157 struct intel_ring_buffer *ring = NULL;
eac1f14f 2158 struct timespec timeout_stack, *timeout = NULL;
23ba4fd0
BW
2159 u32 seqno = 0;
2160 int ret = 0;
2161
eac1f14f
BW
2162 if (args->timeout_ns >= 0) {
2163 timeout_stack = ns_to_timespec(args->timeout_ns);
2164 timeout = &timeout_stack;
2165 }
23ba4fd0
BW
2166
2167 ret = i915_mutex_lock_interruptible(dev);
2168 if (ret)
2169 return ret;
2170
2171 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2172 if (&obj->base == NULL) {
2173 mutex_unlock(&dev->struct_mutex);
2174 return -ENOENT;
2175 }
2176
30dfebf3
DV
2177 /* Need to make sure the object gets inactive eventually. */
2178 ret = i915_gem_object_flush_active(obj);
23ba4fd0
BW
2179 if (ret)
2180 goto out;
2181
2182 if (obj->active) {
0201f1ec 2183 seqno = obj->last_read_seqno;
23ba4fd0
BW
2184 ring = obj->ring;
2185 }
2186
2187 if (seqno == 0)
2188 goto out;
2189
23ba4fd0
BW
2190 /* Do this after OLR check to make sure we make forward progress polling
2191 * on this IOCTL with a 0 timeout (like busy ioctl)
2192 */
2193 if (!args->timeout_ns) {
2194 ret = -ETIME;
2195 goto out;
2196 }
2197
2198 drm_gem_object_unreference(&obj->base);
2199 mutex_unlock(&dev->struct_mutex);
2200
eac1f14f
BW
2201 ret = __wait_seqno(ring, seqno, true, timeout);
2202 if (timeout) {
2203 WARN_ON(!timespec_valid(timeout));
2204 args->timeout_ns = timespec_to_ns(timeout);
2205 }
23ba4fd0
BW
2206 return ret;
2207
2208out:
2209 drm_gem_object_unreference(&obj->base);
2210 mutex_unlock(&dev->struct_mutex);
2211 return ret;
2212}
2213
5816d648
BW
2214/**
2215 * i915_gem_object_sync - sync an object to a ring.
2216 *
2217 * @obj: object which may be in use on another ring.
2218 * @to: ring we wish to use the object on. May be NULL.
2219 *
2220 * This code is meant to abstract object synchronization with the GPU.
2221 * Calling with NULL implies synchronizing the object with the CPU
2222 * rather than a particular GPU ring.
2223 *
2224 * Returns 0 if successful, else propagates up the lower layer error.
2225 */
2911a35b
BW
2226int
2227i915_gem_object_sync(struct drm_i915_gem_object *obj,
2228 struct intel_ring_buffer *to)
2229{
2230 struct intel_ring_buffer *from = obj->ring;
2231 u32 seqno;
2232 int ret, idx;
2233
2234 if (from == NULL || to == from)
2235 return 0;
2236
5816d648 2237 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
0201f1ec 2238 return i915_gem_object_wait_rendering(obj, false);
2911a35b
BW
2239
2240 idx = intel_ring_sync_index(from, to);
2241
0201f1ec 2242 seqno = obj->last_read_seqno;
2911a35b
BW
2243 if (seqno <= from->sync_seqno[idx])
2244 return 0;
2245
b4aca010
BW
2246 ret = i915_gem_check_olr(obj->ring, seqno);
2247 if (ret)
2248 return ret;
2911a35b 2249
1500f7ea 2250 ret = to->sync_to(to, from, seqno);
e3a5a225
BW
2251 if (!ret)
2252 from->sync_seqno[idx] = seqno;
2911a35b 2253
e3a5a225 2254 return ret;
2911a35b
BW
2255}
2256
b5ffc9bc
CW
2257static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2258{
2259 u32 old_write_domain, old_read_domains;
2260
b5ffc9bc
CW
2261 /* Act a barrier for all accesses through the GTT */
2262 mb();
2263
2264 /* Force a pagefault for domain tracking on next user access */
2265 i915_gem_release_mmap(obj);
2266
b97c3d9c
KP
2267 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2268 return;
2269
b5ffc9bc
CW
2270 old_read_domains = obj->base.read_domains;
2271 old_write_domain = obj->base.write_domain;
2272
2273 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2274 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2275
2276 trace_i915_gem_object_change_domain(obj,
2277 old_read_domains,
2278 old_write_domain);
2279}
2280
673a394b
EA
2281/**
2282 * Unbinds an object from the GTT aperture.
2283 */
0f973f27 2284int
05394f39 2285i915_gem_object_unbind(struct drm_i915_gem_object *obj)
673a394b 2286{
7bddb01f 2287 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
673a394b
EA
2288 int ret = 0;
2289
05394f39 2290 if (obj->gtt_space == NULL)
673a394b
EA
2291 return 0;
2292
31d8d651
CW
2293 if (obj->pin_count)
2294 return -EBUSY;
673a394b 2295
a8198eea 2296 ret = i915_gem_object_finish_gpu(obj);
1488fc08 2297 if (ret)
a8198eea
CW
2298 return ret;
2299 /* Continue on if we fail due to EIO, the GPU is hung so we
2300 * should be safe and we need to cleanup or else we might
2301 * cause memory corruption through use-after-free.
2302 */
2303
b5ffc9bc 2304 i915_gem_object_finish_gtt(obj);
5323fd04 2305
96b47b65 2306 /* release the fence reg _after_ flushing */
d9e86c0e 2307 ret = i915_gem_object_put_fence(obj);
1488fc08 2308 if (ret)
d9e86c0e 2309 return ret;
96b47b65 2310
db53a302
CW
2311 trace_i915_gem_object_unbind(obj);
2312
74898d7e
DV
2313 if (obj->has_global_gtt_mapping)
2314 i915_gem_gtt_unbind_object(obj);
7bddb01f
DV
2315 if (obj->has_aliasing_ppgtt_mapping) {
2316 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2317 obj->has_aliasing_ppgtt_mapping = 0;
2318 }
74163907 2319 i915_gem_gtt_finish_object(obj);
7bddb01f 2320
6c085a72
CW
2321 list_del(&obj->mm_list);
2322 list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
75e9e915 2323 /* Avoid an unnecessary call to unbind on rebind. */
05394f39 2324 obj->map_and_fenceable = true;
673a394b 2325
05394f39
CW
2326 drm_mm_put_block(obj->gtt_space);
2327 obj->gtt_space = NULL;
2328 obj->gtt_offset = 0;
673a394b 2329
6c085a72 2330 return 0;
673a394b
EA
2331}
2332
b2da9fe5 2333static int i915_ring_idle(struct intel_ring_buffer *ring)
a56ba56c 2334{
69c2fc89 2335 if (list_empty(&ring->active_list))
64193406
CW
2336 return 0;
2337
199b2bc2 2338 return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
a56ba56c
CW
2339}
2340
b2da9fe5 2341int i915_gpu_idle(struct drm_device *dev)
4df2faf4
DV
2342{
2343 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 2344 struct intel_ring_buffer *ring;
1ec14ad3 2345 int ret, i;
4df2faf4 2346
4df2faf4 2347 /* Flush everything onto the inactive list. */
b4519513
CW
2348 for_each_ring(ring, dev_priv, i) {
2349 ret = i915_ring_idle(ring);
1ec14ad3
CW
2350 if (ret)
2351 return ret;
b4519513 2352
f2ef6eb1
BW
2353 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2354 if (ret)
2355 return ret;
1ec14ad3 2356 }
4df2faf4 2357
8a1a49f9 2358 return 0;
4df2faf4
DV
2359}
2360
9ce079e4
CW
2361static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
2362 struct drm_i915_gem_object *obj)
4e901fdc 2363{
4e901fdc 2364 drm_i915_private_t *dev_priv = dev->dev_private;
4e901fdc
EA
2365 uint64_t val;
2366
9ce079e4
CW
2367 if (obj) {
2368 u32 size = obj->gtt_space->size;
4e901fdc 2369
9ce079e4
CW
2370 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2371 0xfffff000) << 32;
2372 val |= obj->gtt_offset & 0xfffff000;
2373 val |= (uint64_t)((obj->stride / 128) - 1) <<
2374 SANDYBRIDGE_FENCE_PITCH_SHIFT;
4e901fdc 2375
9ce079e4
CW
2376 if (obj->tiling_mode == I915_TILING_Y)
2377 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2378 val |= I965_FENCE_REG_VALID;
2379 } else
2380 val = 0;
c6642782 2381
9ce079e4
CW
2382 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
2383 POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
4e901fdc
EA
2384}
2385
9ce079e4
CW
2386static void i965_write_fence_reg(struct drm_device *dev, int reg,
2387 struct drm_i915_gem_object *obj)
de151cf6 2388{
de151cf6 2389 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6
JB
2390 uint64_t val;
2391
9ce079e4
CW
2392 if (obj) {
2393 u32 size = obj->gtt_space->size;
de151cf6 2394
9ce079e4
CW
2395 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2396 0xfffff000) << 32;
2397 val |= obj->gtt_offset & 0xfffff000;
2398 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2399 if (obj->tiling_mode == I915_TILING_Y)
2400 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2401 val |= I965_FENCE_REG_VALID;
2402 } else
2403 val = 0;
c6642782 2404
9ce079e4
CW
2405 I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
2406 POSTING_READ(FENCE_REG_965_0 + reg * 8);
de151cf6
JB
2407}
2408
9ce079e4
CW
2409static void i915_write_fence_reg(struct drm_device *dev, int reg,
2410 struct drm_i915_gem_object *obj)
de151cf6 2411{
de151cf6 2412 drm_i915_private_t *dev_priv = dev->dev_private;
9ce079e4 2413 u32 val;
de151cf6 2414
9ce079e4
CW
2415 if (obj) {
2416 u32 size = obj->gtt_space->size;
2417 int pitch_val;
2418 int tile_width;
c6642782 2419
9ce079e4
CW
2420 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2421 (size & -size) != size ||
2422 (obj->gtt_offset & (size - 1)),
2423 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2424 obj->gtt_offset, obj->map_and_fenceable, size);
c6642782 2425
9ce079e4
CW
2426 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2427 tile_width = 128;
2428 else
2429 tile_width = 512;
2430
2431 /* Note: pitch better be a power of two tile widths */
2432 pitch_val = obj->stride / tile_width;
2433 pitch_val = ffs(pitch_val) - 1;
2434
2435 val = obj->gtt_offset;
2436 if (obj->tiling_mode == I915_TILING_Y)
2437 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2438 val |= I915_FENCE_SIZE_BITS(size);
2439 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2440 val |= I830_FENCE_REG_VALID;
2441 } else
2442 val = 0;
2443
2444 if (reg < 8)
2445 reg = FENCE_REG_830_0 + reg * 4;
2446 else
2447 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2448
2449 I915_WRITE(reg, val);
2450 POSTING_READ(reg);
de151cf6
JB
2451}
2452
9ce079e4
CW
2453static void i830_write_fence_reg(struct drm_device *dev, int reg,
2454 struct drm_i915_gem_object *obj)
de151cf6 2455{
de151cf6 2456 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6 2457 uint32_t val;
de151cf6 2458
9ce079e4
CW
2459 if (obj) {
2460 u32 size = obj->gtt_space->size;
2461 uint32_t pitch_val;
de151cf6 2462
9ce079e4
CW
2463 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2464 (size & -size) != size ||
2465 (obj->gtt_offset & (size - 1)),
2466 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2467 obj->gtt_offset, size);
e76a16de 2468
9ce079e4
CW
2469 pitch_val = obj->stride / 128;
2470 pitch_val = ffs(pitch_val) - 1;
de151cf6 2471
9ce079e4
CW
2472 val = obj->gtt_offset;
2473 if (obj->tiling_mode == I915_TILING_Y)
2474 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2475 val |= I830_FENCE_SIZE_BITS(size);
2476 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2477 val |= I830_FENCE_REG_VALID;
2478 } else
2479 val = 0;
c6642782 2480
9ce079e4
CW
2481 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2482 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2483}
2484
2485static void i915_gem_write_fence(struct drm_device *dev, int reg,
2486 struct drm_i915_gem_object *obj)
2487{
2488 switch (INTEL_INFO(dev)->gen) {
2489 case 7:
2490 case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2491 case 5:
2492 case 4: i965_write_fence_reg(dev, reg, obj); break;
2493 case 3: i915_write_fence_reg(dev, reg, obj); break;
2494 case 2: i830_write_fence_reg(dev, reg, obj); break;
2495 default: break;
2496 }
de151cf6
JB
2497}
2498
61050808
CW
2499static inline int fence_number(struct drm_i915_private *dev_priv,
2500 struct drm_i915_fence_reg *fence)
2501{
2502 return fence - dev_priv->fence_regs;
2503}
2504
2505static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2506 struct drm_i915_fence_reg *fence,
2507 bool enable)
2508{
2509 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2510 int reg = fence_number(dev_priv, fence);
2511
2512 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2513
2514 if (enable) {
2515 obj->fence_reg = reg;
2516 fence->obj = obj;
2517 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2518 } else {
2519 obj->fence_reg = I915_FENCE_REG_NONE;
2520 fence->obj = NULL;
2521 list_del_init(&fence->lru_list);
2522 }
2523}
2524
d9e86c0e 2525static int
a360bb1a 2526i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
d9e86c0e 2527{
1c293ea3 2528 if (obj->last_fenced_seqno) {
86d5bc37 2529 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
18991845
CW
2530 if (ret)
2531 return ret;
d9e86c0e
CW
2532
2533 obj->last_fenced_seqno = 0;
d9e86c0e
CW
2534 }
2535
63256ec5
CW
2536 /* Ensure that all CPU reads are completed before installing a fence
2537 * and all writes before removing the fence.
2538 */
2539 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2540 mb();
2541
86d5bc37 2542 obj->fenced_gpu_access = false;
d9e86c0e
CW
2543 return 0;
2544}
2545
2546int
2547i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2548{
61050808 2549 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
d9e86c0e
CW
2550 int ret;
2551
a360bb1a 2552 ret = i915_gem_object_flush_fence(obj);
d9e86c0e
CW
2553 if (ret)
2554 return ret;
2555
61050808
CW
2556 if (obj->fence_reg == I915_FENCE_REG_NONE)
2557 return 0;
d9e86c0e 2558
61050808
CW
2559 i915_gem_object_update_fence(obj,
2560 &dev_priv->fence_regs[obj->fence_reg],
2561 false);
2562 i915_gem_object_fence_lost(obj);
d9e86c0e
CW
2563
2564 return 0;
2565}
2566
2567static struct drm_i915_fence_reg *
a360bb1a 2568i915_find_fence_reg(struct drm_device *dev)
ae3db24a 2569{
ae3db24a 2570 struct drm_i915_private *dev_priv = dev->dev_private;
8fe301ad 2571 struct drm_i915_fence_reg *reg, *avail;
d9e86c0e 2572 int i;
ae3db24a
DV
2573
2574 /* First try to find a free reg */
d9e86c0e 2575 avail = NULL;
ae3db24a
DV
2576 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2577 reg = &dev_priv->fence_regs[i];
2578 if (!reg->obj)
d9e86c0e 2579 return reg;
ae3db24a 2580
1690e1eb 2581 if (!reg->pin_count)
d9e86c0e 2582 avail = reg;
ae3db24a
DV
2583 }
2584
d9e86c0e
CW
2585 if (avail == NULL)
2586 return NULL;
ae3db24a
DV
2587
2588 /* None available, try to steal one or wait for a user to finish */
d9e86c0e 2589 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
1690e1eb 2590 if (reg->pin_count)
ae3db24a
DV
2591 continue;
2592
8fe301ad 2593 return reg;
ae3db24a
DV
2594 }
2595
8fe301ad 2596 return NULL;
ae3db24a
DV
2597}
2598
de151cf6 2599/**
9a5a53b3 2600 * i915_gem_object_get_fence - set up fencing for an object
de151cf6
JB
2601 * @obj: object to map through a fence reg
2602 *
2603 * When mapping objects through the GTT, userspace wants to be able to write
2604 * to them without having to worry about swizzling if the object is tiled.
de151cf6
JB
2605 * This function walks the fence regs looking for a free one for @obj,
2606 * stealing one if it can't find any.
2607 *
2608 * It then sets up the reg based on the object's properties: address, pitch
2609 * and tiling format.
9a5a53b3
CW
2610 *
2611 * For an untiled surface, this removes any existing fence.
de151cf6 2612 */
8c4b8c3f 2613int
06d98131 2614i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
de151cf6 2615{
05394f39 2616 struct drm_device *dev = obj->base.dev;
79e53945 2617 struct drm_i915_private *dev_priv = dev->dev_private;
14415745 2618 bool enable = obj->tiling_mode != I915_TILING_NONE;
d9e86c0e 2619 struct drm_i915_fence_reg *reg;
ae3db24a 2620 int ret;
de151cf6 2621
14415745
CW
2622 /* Have we updated the tiling parameters upon the object and so
2623 * will need to serialise the write to the associated fence register?
2624 */
5d82e3e6 2625 if (obj->fence_dirty) {
14415745
CW
2626 ret = i915_gem_object_flush_fence(obj);
2627 if (ret)
2628 return ret;
2629 }
9a5a53b3 2630
d9e86c0e 2631 /* Just update our place in the LRU if our fence is getting reused. */
05394f39
CW
2632 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2633 reg = &dev_priv->fence_regs[obj->fence_reg];
5d82e3e6 2634 if (!obj->fence_dirty) {
14415745
CW
2635 list_move_tail(&reg->lru_list,
2636 &dev_priv->mm.fence_list);
2637 return 0;
2638 }
2639 } else if (enable) {
2640 reg = i915_find_fence_reg(dev);
2641 if (reg == NULL)
2642 return -EDEADLK;
d9e86c0e 2643
14415745
CW
2644 if (reg->obj) {
2645 struct drm_i915_gem_object *old = reg->obj;
2646
2647 ret = i915_gem_object_flush_fence(old);
29c5a587
CW
2648 if (ret)
2649 return ret;
2650
14415745 2651 i915_gem_object_fence_lost(old);
29c5a587 2652 }
14415745 2653 } else
a09ba7fa 2654 return 0;
a09ba7fa 2655
14415745 2656 i915_gem_object_update_fence(obj, reg, enable);
5d82e3e6 2657 obj->fence_dirty = false;
14415745 2658
9ce079e4 2659 return 0;
de151cf6
JB
2660}
2661
42d6ab48
CW
2662static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2663 struct drm_mm_node *gtt_space,
2664 unsigned long cache_level)
2665{
2666 struct drm_mm_node *other;
2667
2668 /* On non-LLC machines we have to be careful when putting differing
2669 * types of snoopable memory together to avoid the prefetcher
2670 * crossing memory domains and dieing.
2671 */
2672 if (HAS_LLC(dev))
2673 return true;
2674
2675 if (gtt_space == NULL)
2676 return true;
2677
2678 if (list_empty(&gtt_space->node_list))
2679 return true;
2680
2681 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2682 if (other->allocated && !other->hole_follows && other->color != cache_level)
2683 return false;
2684
2685 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2686 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2687 return false;
2688
2689 return true;
2690}
2691
2692static void i915_gem_verify_gtt(struct drm_device *dev)
2693{
2694#if WATCH_GTT
2695 struct drm_i915_private *dev_priv = dev->dev_private;
2696 struct drm_i915_gem_object *obj;
2697 int err = 0;
2698
2699 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
2700 if (obj->gtt_space == NULL) {
2701 printk(KERN_ERR "object found on GTT list with no space reserved\n");
2702 err++;
2703 continue;
2704 }
2705
2706 if (obj->cache_level != obj->gtt_space->color) {
2707 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
2708 obj->gtt_space->start,
2709 obj->gtt_space->start + obj->gtt_space->size,
2710 obj->cache_level,
2711 obj->gtt_space->color);
2712 err++;
2713 continue;
2714 }
2715
2716 if (!i915_gem_valid_gtt_space(dev,
2717 obj->gtt_space,
2718 obj->cache_level)) {
2719 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
2720 obj->gtt_space->start,
2721 obj->gtt_space->start + obj->gtt_space->size,
2722 obj->cache_level);
2723 err++;
2724 continue;
2725 }
2726 }
2727
2728 WARN_ON(err);
2729#endif
2730}
2731
673a394b
EA
2732/**
2733 * Finds free space in the GTT aperture and binds the object there.
2734 */
2735static int
05394f39 2736i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
920afa77 2737 unsigned alignment,
75e9e915 2738 bool map_and_fenceable)
673a394b 2739{
05394f39 2740 struct drm_device *dev = obj->base.dev;
673a394b 2741 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 2742 struct drm_mm_node *free_space;
5e783301 2743 u32 size, fence_size, fence_alignment, unfenced_alignment;
75e9e915 2744 bool mappable, fenceable;
07f73f69 2745 int ret;
673a394b 2746
05394f39 2747 if (obj->madv != I915_MADV_WILLNEED) {
3ef94daa
CW
2748 DRM_ERROR("Attempting to bind a purgeable object\n");
2749 return -EINVAL;
2750 }
2751
e28f8711
CW
2752 fence_size = i915_gem_get_gtt_size(dev,
2753 obj->base.size,
2754 obj->tiling_mode);
2755 fence_alignment = i915_gem_get_gtt_alignment(dev,
2756 obj->base.size,
2757 obj->tiling_mode);
2758 unfenced_alignment =
2759 i915_gem_get_unfenced_gtt_alignment(dev,
2760 obj->base.size,
2761 obj->tiling_mode);
a00b10c3 2762
673a394b 2763 if (alignment == 0)
5e783301
DV
2764 alignment = map_and_fenceable ? fence_alignment :
2765 unfenced_alignment;
75e9e915 2766 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
673a394b
EA
2767 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2768 return -EINVAL;
2769 }
2770
05394f39 2771 size = map_and_fenceable ? fence_size : obj->base.size;
a00b10c3 2772
654fc607
CW
2773 /* If the object is bigger than the entire aperture, reject it early
2774 * before evicting everything in a vain attempt to find space.
2775 */
05394f39 2776 if (obj->base.size >
75e9e915 2777 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
654fc607
CW
2778 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2779 return -E2BIG;
2780 }
2781
6c085a72
CW
2782 ret = i915_gem_object_get_pages_gtt(obj);
2783 if (ret)
2784 return ret;
2785
673a394b 2786 search_free:
75e9e915 2787 if (map_and_fenceable)
920afa77 2788 free_space =
42d6ab48
CW
2789 drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
2790 size, alignment, obj->cache_level,
2791 0, dev_priv->mm.gtt_mappable_end,
2792 false);
920afa77 2793 else
42d6ab48
CW
2794 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
2795 size, alignment, obj->cache_level,
2796 false);
920afa77
DV
2797
2798 if (free_space != NULL) {
75e9e915 2799 if (map_and_fenceable)
05394f39 2800 obj->gtt_space =
920afa77 2801 drm_mm_get_block_range_generic(free_space,
42d6ab48 2802 size, alignment, obj->cache_level,
6b9d89b4 2803 0, dev_priv->mm.gtt_mappable_end,
42d6ab48 2804 false);
920afa77 2805 else
05394f39 2806 obj->gtt_space =
42d6ab48
CW
2807 drm_mm_get_block_generic(free_space,
2808 size, alignment, obj->cache_level,
2809 false);
920afa77 2810 }
05394f39 2811 if (obj->gtt_space == NULL) {
75e9e915 2812 ret = i915_gem_evict_something(dev, size, alignment,
42d6ab48 2813 obj->cache_level,
75e9e915 2814 map_and_fenceable);
9731129c 2815 if (ret)
673a394b 2816 return ret;
9731129c 2817
673a394b
EA
2818 goto search_free;
2819 }
42d6ab48
CW
2820 if (WARN_ON(!i915_gem_valid_gtt_space(dev,
2821 obj->gtt_space,
2822 obj->cache_level))) {
2823 drm_mm_put_block(obj->gtt_space);
2824 obj->gtt_space = NULL;
2825 return -EINVAL;
2826 }
673a394b 2827
673a394b 2828
74163907 2829 ret = i915_gem_gtt_prepare_object(obj);
7c2e6fdf 2830 if (ret) {
05394f39
CW
2831 drm_mm_put_block(obj->gtt_space);
2832 obj->gtt_space = NULL;
6c085a72 2833 return ret;
673a394b 2834 }
673a394b 2835
0ebb9829
DV
2836 if (!dev_priv->mm.aliasing_ppgtt)
2837 i915_gem_gtt_bind_object(obj, obj->cache_level);
673a394b 2838
6c085a72 2839 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
05394f39 2840 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
bf1a1092 2841
6299f992 2842 obj->gtt_offset = obj->gtt_space->start;
1c5d22f7 2843
75e9e915 2844 fenceable =
05394f39 2845 obj->gtt_space->size == fence_size &&
0206e353 2846 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
a00b10c3 2847
75e9e915 2848 mappable =
05394f39 2849 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
a00b10c3 2850
05394f39 2851 obj->map_and_fenceable = mappable && fenceable;
75e9e915 2852
db53a302 2853 trace_i915_gem_object_bind(obj, map_and_fenceable);
42d6ab48 2854 i915_gem_verify_gtt(dev);
673a394b
EA
2855 return 0;
2856}
2857
2858void
05394f39 2859i915_gem_clflush_object(struct drm_i915_gem_object *obj)
673a394b 2860{
673a394b
EA
2861 /* If we don't have a page list set up, then we're not pinned
2862 * to GPU, and we can ignore the cache flush because it'll happen
2863 * again at bind time.
2864 */
05394f39 2865 if (obj->pages == NULL)
673a394b
EA
2866 return;
2867
9c23f7fc
CW
2868 /* If the GPU is snooping the contents of the CPU cache,
2869 * we do not need to manually clear the CPU cache lines. However,
2870 * the caches are only snooped when the render cache is
2871 * flushed/invalidated. As we always have to emit invalidations
2872 * and flushes when moving into and out of the RENDER domain, correct
2873 * snooping behaviour occurs naturally as the result of our domain
2874 * tracking.
2875 */
2876 if (obj->cache_level != I915_CACHE_NONE)
2877 return;
2878
1c5d22f7 2879 trace_i915_gem_object_clflush(obj);
cfa16a0d 2880
05394f39 2881 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
673a394b
EA
2882}
2883
e47c68e9
EA
2884/** Flushes the GTT write domain for the object if it's dirty. */
2885static void
05394f39 2886i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 2887{
1c5d22f7
CW
2888 uint32_t old_write_domain;
2889
05394f39 2890 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
e47c68e9
EA
2891 return;
2892
63256ec5 2893 /* No actual flushing is required for the GTT write domain. Writes
e47c68e9
EA
2894 * to it immediately go to main memory as far as we know, so there's
2895 * no chipset flush. It also doesn't land in render cache.
63256ec5
CW
2896 *
2897 * However, we do have to enforce the order so that all writes through
2898 * the GTT land before any writes to the device, such as updates to
2899 * the GATT itself.
e47c68e9 2900 */
63256ec5
CW
2901 wmb();
2902
05394f39
CW
2903 old_write_domain = obj->base.write_domain;
2904 obj->base.write_domain = 0;
1c5d22f7
CW
2905
2906 trace_i915_gem_object_change_domain(obj,
05394f39 2907 obj->base.read_domains,
1c5d22f7 2908 old_write_domain);
e47c68e9
EA
2909}
2910
2911/** Flushes the CPU write domain for the object if it's dirty. */
2912static void
05394f39 2913i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 2914{
1c5d22f7 2915 uint32_t old_write_domain;
e47c68e9 2916
05394f39 2917 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
e47c68e9
EA
2918 return;
2919
2920 i915_gem_clflush_object(obj);
40ce6575 2921 intel_gtt_chipset_flush();
05394f39
CW
2922 old_write_domain = obj->base.write_domain;
2923 obj->base.write_domain = 0;
1c5d22f7
CW
2924
2925 trace_i915_gem_object_change_domain(obj,
05394f39 2926 obj->base.read_domains,
1c5d22f7 2927 old_write_domain);
e47c68e9
EA
2928}
2929
2ef7eeaa
EA
2930/**
2931 * Moves a single object to the GTT read, and possibly write domain.
2932 *
2933 * This function returns when the move is complete, including waiting on
2934 * flushes to occur.
2935 */
79e53945 2936int
2021746e 2937i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2ef7eeaa 2938{
8325a09d 2939 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1c5d22f7 2940 uint32_t old_write_domain, old_read_domains;
e47c68e9 2941 int ret;
2ef7eeaa 2942
02354392 2943 /* Not valid to be called on unbound objects. */
05394f39 2944 if (obj->gtt_space == NULL)
02354392
EA
2945 return -EINVAL;
2946
8d7e3de1
CW
2947 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2948 return 0;
2949
0201f1ec
CW
2950 ret = i915_gem_object_wait_rendering(obj, !write);
2951 if (ret)
2952 return ret;
2dafb1e0 2953
7213342d 2954 i915_gem_object_flush_cpu_write_domain(obj);
1c5d22f7 2955
05394f39
CW
2956 old_write_domain = obj->base.write_domain;
2957 old_read_domains = obj->base.read_domains;
1c5d22f7 2958
e47c68e9
EA
2959 /* It should now be out of any other write domains, and we can update
2960 * the domain values for our changes.
2961 */
05394f39
CW
2962 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2963 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
e47c68e9 2964 if (write) {
05394f39
CW
2965 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2966 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2967 obj->dirty = 1;
2ef7eeaa
EA
2968 }
2969
1c5d22f7
CW
2970 trace_i915_gem_object_change_domain(obj,
2971 old_read_domains,
2972 old_write_domain);
2973
8325a09d
CW
2974 /* And bump the LRU for this access */
2975 if (i915_gem_object_is_inactive(obj))
2976 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2977
e47c68e9
EA
2978 return 0;
2979}
2980
e4ffd173
CW
2981int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2982 enum i915_cache_level cache_level)
2983{
7bddb01f
DV
2984 struct drm_device *dev = obj->base.dev;
2985 drm_i915_private_t *dev_priv = dev->dev_private;
e4ffd173
CW
2986 int ret;
2987
2988 if (obj->cache_level == cache_level)
2989 return 0;
2990
2991 if (obj->pin_count) {
2992 DRM_DEBUG("can not change the cache level of pinned objects\n");
2993 return -EBUSY;
2994 }
2995
42d6ab48
CW
2996 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
2997 ret = i915_gem_object_unbind(obj);
2998 if (ret)
2999 return ret;
3000 }
3001
e4ffd173
CW
3002 if (obj->gtt_space) {
3003 ret = i915_gem_object_finish_gpu(obj);
3004 if (ret)
3005 return ret;
3006
3007 i915_gem_object_finish_gtt(obj);
3008
3009 /* Before SandyBridge, you could not use tiling or fence
3010 * registers with snooped memory, so relinquish any fences
3011 * currently pointing to our region in the aperture.
3012 */
42d6ab48 3013 if (INTEL_INFO(dev)->gen < 6) {
e4ffd173
CW
3014 ret = i915_gem_object_put_fence(obj);
3015 if (ret)
3016 return ret;
3017 }
3018
74898d7e
DV
3019 if (obj->has_global_gtt_mapping)
3020 i915_gem_gtt_bind_object(obj, cache_level);
7bddb01f
DV
3021 if (obj->has_aliasing_ppgtt_mapping)
3022 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3023 obj, cache_level);
42d6ab48
CW
3024
3025 obj->gtt_space->color = cache_level;
e4ffd173
CW
3026 }
3027
3028 if (cache_level == I915_CACHE_NONE) {
3029 u32 old_read_domains, old_write_domain;
3030
3031 /* If we're coming from LLC cached, then we haven't
3032 * actually been tracking whether the data is in the
3033 * CPU cache or not, since we only allow one bit set
3034 * in obj->write_domain and have been skipping the clflushes.
3035 * Just set it to the CPU cache for now.
3036 */
3037 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3038 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3039
3040 old_read_domains = obj->base.read_domains;
3041 old_write_domain = obj->base.write_domain;
3042
3043 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3044 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3045
3046 trace_i915_gem_object_change_domain(obj,
3047 old_read_domains,
3048 old_write_domain);
3049 }
3050
3051 obj->cache_level = cache_level;
42d6ab48 3052 i915_gem_verify_gtt(dev);
e4ffd173
CW
3053 return 0;
3054}
3055
e6994aee
CW
3056int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
3057 struct drm_file *file)
3058{
3059 struct drm_i915_gem_cacheing *args = data;
3060 struct drm_i915_gem_object *obj;
3061 int ret;
3062
3063 ret = i915_mutex_lock_interruptible(dev);
3064 if (ret)
3065 return ret;
3066
3067 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3068 if (&obj->base == NULL) {
3069 ret = -ENOENT;
3070 goto unlock;
3071 }
3072
3073 args->cacheing = obj->cache_level != I915_CACHE_NONE;
3074
3075 drm_gem_object_unreference(&obj->base);
3076unlock:
3077 mutex_unlock(&dev->struct_mutex);
3078 return ret;
3079}
3080
3081int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
3082 struct drm_file *file)
3083{
3084 struct drm_i915_gem_cacheing *args = data;
3085 struct drm_i915_gem_object *obj;
3086 enum i915_cache_level level;
3087 int ret;
3088
3089 ret = i915_mutex_lock_interruptible(dev);
3090 if (ret)
3091 return ret;
3092
3093 switch (args->cacheing) {
3094 case I915_CACHEING_NONE:
3095 level = I915_CACHE_NONE;
3096 break;
3097 case I915_CACHEING_CACHED:
3098 level = I915_CACHE_LLC;
3099 break;
3100 default:
3101 return -EINVAL;
3102 }
3103
3104 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3105 if (&obj->base == NULL) {
3106 ret = -ENOENT;
3107 goto unlock;
3108 }
3109
3110 ret = i915_gem_object_set_cache_level(obj, level);
3111
3112 drm_gem_object_unreference(&obj->base);
3113unlock:
3114 mutex_unlock(&dev->struct_mutex);
3115 return ret;
3116}
3117
b9241ea3 3118/*
2da3b9b9
CW
3119 * Prepare buffer for display plane (scanout, cursors, etc).
3120 * Can be called from an uninterruptible phase (modesetting) and allows
3121 * any flushes to be pipelined (for pageflips).
b9241ea3
ZW
3122 */
3123int
2da3b9b9
CW
3124i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3125 u32 alignment,
919926ae 3126 struct intel_ring_buffer *pipelined)
b9241ea3 3127{
2da3b9b9 3128 u32 old_read_domains, old_write_domain;
b9241ea3
ZW
3129 int ret;
3130
0be73284 3131 if (pipelined != obj->ring) {
2911a35b
BW
3132 ret = i915_gem_object_sync(obj, pipelined);
3133 if (ret)
b9241ea3
ZW
3134 return ret;
3135 }
3136
a7ef0640
EA
3137 /* The display engine is not coherent with the LLC cache on gen6. As
3138 * a result, we make sure that the pinning that is about to occur is
3139 * done with uncached PTEs. This is lowest common denominator for all
3140 * chipsets.
3141 *
3142 * However for gen6+, we could do better by using the GFDT bit instead
3143 * of uncaching, which would allow us to flush all the LLC-cached data
3144 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3145 */
3146 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3147 if (ret)
3148 return ret;
3149
2da3b9b9
CW
3150 /* As the user may map the buffer once pinned in the display plane
3151 * (e.g. libkms for the bootup splash), we have to ensure that we
3152 * always use map_and_fenceable for all scanout buffers.
3153 */
3154 ret = i915_gem_object_pin(obj, alignment, true);
3155 if (ret)
3156 return ret;
3157
b118c1e3
CW
3158 i915_gem_object_flush_cpu_write_domain(obj);
3159
2da3b9b9 3160 old_write_domain = obj->base.write_domain;
05394f39 3161 old_read_domains = obj->base.read_domains;
2da3b9b9
CW
3162
3163 /* It should now be out of any other write domains, and we can update
3164 * the domain values for our changes.
3165 */
e5f1d962 3166 obj->base.write_domain = 0;
05394f39 3167 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
b9241ea3
ZW
3168
3169 trace_i915_gem_object_change_domain(obj,
3170 old_read_domains,
2da3b9b9 3171 old_write_domain);
b9241ea3
ZW
3172
3173 return 0;
3174}
3175
85345517 3176int
a8198eea 3177i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
85345517 3178{
88241785
CW
3179 int ret;
3180
a8198eea 3181 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
85345517
CW
3182 return 0;
3183
0201f1ec 3184 ret = i915_gem_object_wait_rendering(obj, false);
c501ae7f
CW
3185 if (ret)
3186 return ret;
3187
a8198eea
CW
3188 /* Ensure that we invalidate the GPU's caches and TLBs. */
3189 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
c501ae7f 3190 return 0;
85345517
CW
3191}
3192
e47c68e9
EA
3193/**
3194 * Moves a single object to the CPU read, and possibly write domain.
3195 *
3196 * This function returns when the move is complete, including waiting on
3197 * flushes to occur.
3198 */
dabdfe02 3199int
919926ae 3200i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
e47c68e9 3201{
1c5d22f7 3202 uint32_t old_write_domain, old_read_domains;
e47c68e9
EA
3203 int ret;
3204
8d7e3de1
CW
3205 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3206 return 0;
3207
0201f1ec
CW
3208 ret = i915_gem_object_wait_rendering(obj, !write);
3209 if (ret)
3210 return ret;
2ef7eeaa 3211
e47c68e9 3212 i915_gem_object_flush_gtt_write_domain(obj);
2ef7eeaa 3213
05394f39
CW
3214 old_write_domain = obj->base.write_domain;
3215 old_read_domains = obj->base.read_domains;
1c5d22f7 3216
e47c68e9 3217 /* Flush the CPU cache if it's still invalid. */
05394f39 3218 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2ef7eeaa 3219 i915_gem_clflush_object(obj);
2ef7eeaa 3220
05394f39 3221 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
3222 }
3223
3224 /* It should now be out of any other write domains, and we can update
3225 * the domain values for our changes.
3226 */
05394f39 3227 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
e47c68e9
EA
3228
3229 /* If we're writing through the CPU, then the GPU read domains will
3230 * need to be invalidated at next use.
3231 */
3232 if (write) {
05394f39
CW
3233 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3234 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
e47c68e9 3235 }
2ef7eeaa 3236
1c5d22f7
CW
3237 trace_i915_gem_object_change_domain(obj,
3238 old_read_domains,
3239 old_write_domain);
3240
2ef7eeaa
EA
3241 return 0;
3242}
3243
673a394b
EA
3244/* Throttle our rendering by waiting until the ring has completed our requests
3245 * emitted over 20 msec ago.
3246 *
b962442e
EA
3247 * Note that if we were to use the current jiffies each time around the loop,
3248 * we wouldn't escape the function with any frames outstanding if the time to
3249 * render a frame was over 20ms.
3250 *
673a394b
EA
3251 * This should get us reasonable parallelism between CPU and GPU but also
3252 * relatively low latency when blocking on a particular request to finish.
3253 */
40a5f0de 3254static int
f787a5f5 3255i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
40a5f0de 3256{
f787a5f5
CW
3257 struct drm_i915_private *dev_priv = dev->dev_private;
3258 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e 3259 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
f787a5f5
CW
3260 struct drm_i915_gem_request *request;
3261 struct intel_ring_buffer *ring = NULL;
3262 u32 seqno = 0;
3263 int ret;
93533c29 3264
e110e8d6
CW
3265 if (atomic_read(&dev_priv->mm.wedged))
3266 return -EIO;
3267
1c25595f 3268 spin_lock(&file_priv->mm.lock);
f787a5f5 3269 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
b962442e
EA
3270 if (time_after_eq(request->emitted_jiffies, recent_enough))
3271 break;
40a5f0de 3272
f787a5f5
CW
3273 ring = request->ring;
3274 seqno = request->seqno;
b962442e 3275 }
1c25595f 3276 spin_unlock(&file_priv->mm.lock);
40a5f0de 3277
f787a5f5
CW
3278 if (seqno == 0)
3279 return 0;
2bc43b5c 3280
5c81fe85 3281 ret = __wait_seqno(ring, seqno, true, NULL);
f787a5f5
CW
3282 if (ret == 0)
3283 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
40a5f0de
EA
3284
3285 return ret;
3286}
3287
673a394b 3288int
05394f39
CW
3289i915_gem_object_pin(struct drm_i915_gem_object *obj,
3290 uint32_t alignment,
75e9e915 3291 bool map_and_fenceable)
673a394b 3292{
673a394b
EA
3293 int ret;
3294
05394f39 3295 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
ac0c6b5a 3296
05394f39
CW
3297 if (obj->gtt_space != NULL) {
3298 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3299 (map_and_fenceable && !obj->map_and_fenceable)) {
3300 WARN(obj->pin_count,
ae7d49d8 3301 "bo is already pinned with incorrect alignment:"
75e9e915
DV
3302 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3303 " obj->map_and_fenceable=%d\n",
05394f39 3304 obj->gtt_offset, alignment,
75e9e915 3305 map_and_fenceable,
05394f39 3306 obj->map_and_fenceable);
ac0c6b5a
CW
3307 ret = i915_gem_object_unbind(obj);
3308 if (ret)
3309 return ret;
3310 }
3311 }
3312
05394f39 3313 if (obj->gtt_space == NULL) {
a00b10c3 3314 ret = i915_gem_object_bind_to_gtt(obj, alignment,
75e9e915 3315 map_and_fenceable);
9731129c 3316 if (ret)
673a394b 3317 return ret;
22c344e9 3318 }
76446cac 3319
74898d7e
DV
3320 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3321 i915_gem_gtt_bind_object(obj, obj->cache_level);
3322
1b50247a 3323 obj->pin_count++;
6299f992 3324 obj->pin_mappable |= map_and_fenceable;
673a394b
EA
3325
3326 return 0;
3327}
3328
3329void
05394f39 3330i915_gem_object_unpin(struct drm_i915_gem_object *obj)
673a394b 3331{
05394f39
CW
3332 BUG_ON(obj->pin_count == 0);
3333 BUG_ON(obj->gtt_space == NULL);
673a394b 3334
1b50247a 3335 if (--obj->pin_count == 0)
6299f992 3336 obj->pin_mappable = false;
673a394b
EA
3337}
3338
3339int
3340i915_gem_pin_ioctl(struct drm_device *dev, void *data,
05394f39 3341 struct drm_file *file)
673a394b
EA
3342{
3343 struct drm_i915_gem_pin *args = data;
05394f39 3344 struct drm_i915_gem_object *obj;
673a394b
EA
3345 int ret;
3346
1d7cfea1
CW
3347 ret = i915_mutex_lock_interruptible(dev);
3348 if (ret)
3349 return ret;
673a394b 3350
05394f39 3351 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3352 if (&obj->base == NULL) {
1d7cfea1
CW
3353 ret = -ENOENT;
3354 goto unlock;
673a394b 3355 }
673a394b 3356
05394f39 3357 if (obj->madv != I915_MADV_WILLNEED) {
bb6baf76 3358 DRM_ERROR("Attempting to pin a purgeable buffer\n");
1d7cfea1
CW
3359 ret = -EINVAL;
3360 goto out;
3ef94daa
CW
3361 }
3362
05394f39 3363 if (obj->pin_filp != NULL && obj->pin_filp != file) {
79e53945
JB
3364 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3365 args->handle);
1d7cfea1
CW
3366 ret = -EINVAL;
3367 goto out;
79e53945
JB
3368 }
3369
05394f39
CW
3370 obj->user_pin_count++;
3371 obj->pin_filp = file;
3372 if (obj->user_pin_count == 1) {
75e9e915 3373 ret = i915_gem_object_pin(obj, args->alignment, true);
1d7cfea1
CW
3374 if (ret)
3375 goto out;
673a394b
EA
3376 }
3377
3378 /* XXX - flush the CPU caches for pinned objects
3379 * as the X server doesn't manage domains yet
3380 */
e47c68e9 3381 i915_gem_object_flush_cpu_write_domain(obj);
05394f39 3382 args->offset = obj->gtt_offset;
1d7cfea1 3383out:
05394f39 3384 drm_gem_object_unreference(&obj->base);
1d7cfea1 3385unlock:
673a394b 3386 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3387 return ret;
673a394b
EA
3388}
3389
3390int
3391i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
05394f39 3392 struct drm_file *file)
673a394b
EA
3393{
3394 struct drm_i915_gem_pin *args = data;
05394f39 3395 struct drm_i915_gem_object *obj;
76c1dec1 3396 int ret;
673a394b 3397
1d7cfea1
CW
3398 ret = i915_mutex_lock_interruptible(dev);
3399 if (ret)
3400 return ret;
673a394b 3401
05394f39 3402 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3403 if (&obj->base == NULL) {
1d7cfea1
CW
3404 ret = -ENOENT;
3405 goto unlock;
673a394b 3406 }
76c1dec1 3407
05394f39 3408 if (obj->pin_filp != file) {
79e53945
JB
3409 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3410 args->handle);
1d7cfea1
CW
3411 ret = -EINVAL;
3412 goto out;
79e53945 3413 }
05394f39
CW
3414 obj->user_pin_count--;
3415 if (obj->user_pin_count == 0) {
3416 obj->pin_filp = NULL;
79e53945
JB
3417 i915_gem_object_unpin(obj);
3418 }
673a394b 3419
1d7cfea1 3420out:
05394f39 3421 drm_gem_object_unreference(&obj->base);
1d7cfea1 3422unlock:
673a394b 3423 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3424 return ret;
673a394b
EA
3425}
3426
3427int
3428i915_gem_busy_ioctl(struct drm_device *dev, void *data,
05394f39 3429 struct drm_file *file)
673a394b
EA
3430{
3431 struct drm_i915_gem_busy *args = data;
05394f39 3432 struct drm_i915_gem_object *obj;
30dbf0c0
CW
3433 int ret;
3434
76c1dec1 3435 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 3436 if (ret)
76c1dec1 3437 return ret;
673a394b 3438
05394f39 3439 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3440 if (&obj->base == NULL) {
1d7cfea1
CW
3441 ret = -ENOENT;
3442 goto unlock;
673a394b 3443 }
d1b851fc 3444
0be555b6
CW
3445 /* Count all active objects as busy, even if they are currently not used
3446 * by the gpu. Users of this interface expect objects to eventually
3447 * become non-busy without any further actions, therefore emit any
3448 * necessary flushes here.
c4de0a5d 3449 */
30dfebf3 3450 ret = i915_gem_object_flush_active(obj);
0be555b6 3451
30dfebf3 3452 args->busy = obj->active;
e9808edd
CW
3453 if (obj->ring) {
3454 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3455 args->busy |= intel_ring_flag(obj->ring) << 16;
3456 }
673a394b 3457
05394f39 3458 drm_gem_object_unreference(&obj->base);
1d7cfea1 3459unlock:
673a394b 3460 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3461 return ret;
673a394b
EA
3462}
3463
3464int
3465i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3466 struct drm_file *file_priv)
3467{
0206e353 3468 return i915_gem_ring_throttle(dev, file_priv);
673a394b
EA
3469}
3470
3ef94daa
CW
3471int
3472i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3473 struct drm_file *file_priv)
3474{
3475 struct drm_i915_gem_madvise *args = data;
05394f39 3476 struct drm_i915_gem_object *obj;
76c1dec1 3477 int ret;
3ef94daa
CW
3478
3479 switch (args->madv) {
3480 case I915_MADV_DONTNEED:
3481 case I915_MADV_WILLNEED:
3482 break;
3483 default:
3484 return -EINVAL;
3485 }
3486
1d7cfea1
CW
3487 ret = i915_mutex_lock_interruptible(dev);
3488 if (ret)
3489 return ret;
3490
05394f39 3491 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
c8725226 3492 if (&obj->base == NULL) {
1d7cfea1
CW
3493 ret = -ENOENT;
3494 goto unlock;
3ef94daa 3495 }
3ef94daa 3496
05394f39 3497 if (obj->pin_count) {
1d7cfea1
CW
3498 ret = -EINVAL;
3499 goto out;
3ef94daa
CW
3500 }
3501
05394f39
CW
3502 if (obj->madv != __I915_MADV_PURGED)
3503 obj->madv = args->madv;
3ef94daa 3504
6c085a72
CW
3505 /* if the object is no longer attached, discard its backing storage */
3506 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
2d7ef395
CW
3507 i915_gem_object_truncate(obj);
3508
05394f39 3509 args->retained = obj->madv != __I915_MADV_PURGED;
bb6baf76 3510
1d7cfea1 3511out:
05394f39 3512 drm_gem_object_unreference(&obj->base);
1d7cfea1 3513unlock:
3ef94daa 3514 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3515 return ret;
3ef94daa
CW
3516}
3517
05394f39
CW
3518struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3519 size_t size)
ac52bc56 3520{
73aa808f 3521 struct drm_i915_private *dev_priv = dev->dev_private;
c397b908 3522 struct drm_i915_gem_object *obj;
5949eac4 3523 struct address_space *mapping;
bed1ea95 3524 u32 mask;
ac52bc56 3525
c397b908
DV
3526 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3527 if (obj == NULL)
3528 return NULL;
673a394b 3529
c397b908
DV
3530 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3531 kfree(obj);
3532 return NULL;
3533 }
673a394b 3534
bed1ea95
CW
3535 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3536 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3537 /* 965gm cannot relocate objects above 4GiB. */
3538 mask &= ~__GFP_HIGHMEM;
3539 mask |= __GFP_DMA32;
3540 }
3541
5949eac4 3542 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
bed1ea95 3543 mapping_set_gfp_mask(mapping, mask);
5949eac4 3544
73aa808f
CW
3545 i915_gem_info_add_obj(dev_priv, size);
3546
c397b908
DV
3547 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3548 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
673a394b 3549
3d29b842
ED
3550 if (HAS_LLC(dev)) {
3551 /* On some devices, we can have the GPU use the LLC (the CPU
a1871112
EA
3552 * cache) for about a 10% performance improvement
3553 * compared to uncached. Graphics requests other than
3554 * display scanout are coherent with the CPU in
3555 * accessing this cache. This means in this mode we
3556 * don't need to clflush on the CPU side, and on the
3557 * GPU side we only need to flush internal caches to
3558 * get data visible to the CPU.
3559 *
3560 * However, we maintain the display planes as UC, and so
3561 * need to rebind when first used as such.
3562 */
3563 obj->cache_level = I915_CACHE_LLC;
3564 } else
3565 obj->cache_level = I915_CACHE_NONE;
3566
62b8b215 3567 obj->base.driver_private = NULL;
c397b908 3568 obj->fence_reg = I915_FENCE_REG_NONE;
69dc4987 3569 INIT_LIST_HEAD(&obj->mm_list);
93a37f20 3570 INIT_LIST_HEAD(&obj->gtt_list);
69dc4987 3571 INIT_LIST_HEAD(&obj->ring_list);
432e58ed 3572 INIT_LIST_HEAD(&obj->exec_list);
c397b908 3573 obj->madv = I915_MADV_WILLNEED;
75e9e915
DV
3574 /* Avoid an unnecessary call to unbind on the first bind. */
3575 obj->map_and_fenceable = true;
de151cf6 3576
05394f39 3577 return obj;
c397b908
DV
3578}
3579
3580int i915_gem_init_object(struct drm_gem_object *obj)
3581{
3582 BUG();
de151cf6 3583
673a394b
EA
3584 return 0;
3585}
3586
1488fc08 3587void i915_gem_free_object(struct drm_gem_object *gem_obj)
673a394b 3588{
1488fc08 3589 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
05394f39 3590 struct drm_device *dev = obj->base.dev;
be72615b 3591 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 3592
26e12f89
CW
3593 trace_i915_gem_object_destroy(obj);
3594
1286ff73
DV
3595 if (gem_obj->import_attach)
3596 drm_prime_gem_destroy(gem_obj, obj->sg_table);
3597
1488fc08
CW
3598 if (obj->phys_obj)
3599 i915_gem_detach_phys_object(dev, obj);
3600
3601 obj->pin_count = 0;
3602 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3603 bool was_interruptible;
3604
3605 was_interruptible = dev_priv->mm.interruptible;
3606 dev_priv->mm.interruptible = false;
3607
3608 WARN_ON(i915_gem_object_unbind(obj));
3609
3610 dev_priv->mm.interruptible = was_interruptible;
3611 }
3612
6c085a72 3613 i915_gem_object_put_pages_gtt(obj);
05394f39 3614 if (obj->base.map_list.map)
b464e9a2 3615 drm_gem_free_mmap_offset(&obj->base);
de151cf6 3616
05394f39
CW
3617 drm_gem_object_release(&obj->base);
3618 i915_gem_info_remove_obj(dev_priv, obj->base.size);
c397b908 3619
05394f39
CW
3620 kfree(obj->bit_17);
3621 kfree(obj);
673a394b
EA
3622}
3623
29105ccc
CW
3624int
3625i915_gem_idle(struct drm_device *dev)
3626{
3627 drm_i915_private_t *dev_priv = dev->dev_private;
3628 int ret;
28dfe52a 3629
29105ccc 3630 mutex_lock(&dev->struct_mutex);
1c5d22f7 3631
87acb0a5 3632 if (dev_priv->mm.suspended) {
29105ccc
CW
3633 mutex_unlock(&dev->struct_mutex);
3634 return 0;
28dfe52a
EA
3635 }
3636
b2da9fe5 3637 ret = i915_gpu_idle(dev);
6dbe2772
KP
3638 if (ret) {
3639 mutex_unlock(&dev->struct_mutex);
673a394b 3640 return ret;
6dbe2772 3641 }
b2da9fe5 3642 i915_gem_retire_requests(dev);
673a394b 3643
29105ccc 3644 /* Under UMS, be paranoid and evict. */
a39d7efc 3645 if (!drm_core_check_feature(dev, DRIVER_MODESET))
6c085a72 3646 i915_gem_evict_everything(dev);
29105ccc 3647
312817a3
CW
3648 i915_gem_reset_fences(dev);
3649
29105ccc
CW
3650 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3651 * We need to replace this with a semaphore, or something.
3652 * And not confound mm.suspended!
3653 */
3654 dev_priv->mm.suspended = 1;
bc0c7f14 3655 del_timer_sync(&dev_priv->hangcheck_timer);
29105ccc
CW
3656
3657 i915_kernel_lost_context(dev);
6dbe2772 3658 i915_gem_cleanup_ringbuffer(dev);
29105ccc 3659
6dbe2772
KP
3660 mutex_unlock(&dev->struct_mutex);
3661
29105ccc
CW
3662 /* Cancel the retire work handler, which should be idle now. */
3663 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3664
673a394b
EA
3665 return 0;
3666}
3667
b9524a1e
BW
3668void i915_gem_l3_remap(struct drm_device *dev)
3669{
3670 drm_i915_private_t *dev_priv = dev->dev_private;
3671 u32 misccpctl;
3672 int i;
3673
3674 if (!IS_IVYBRIDGE(dev))
3675 return;
3676
3677 if (!dev_priv->mm.l3_remap_info)
3678 return;
3679
3680 misccpctl = I915_READ(GEN7_MISCCPCTL);
3681 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3682 POSTING_READ(GEN7_MISCCPCTL);
3683
3684 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3685 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3686 if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
3687 DRM_DEBUG("0x%x was already programmed to %x\n",
3688 GEN7_L3LOG_BASE + i, remap);
3689 if (remap && !dev_priv->mm.l3_remap_info[i/4])
3690 DRM_DEBUG_DRIVER("Clearing remapped register\n");
3691 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
3692 }
3693
3694 /* Make sure all the writes land before disabling dop clock gating */
3695 POSTING_READ(GEN7_L3LOG_BASE);
3696
3697 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
3698}
3699
f691e2f4
DV
3700void i915_gem_init_swizzling(struct drm_device *dev)
3701{
3702 drm_i915_private_t *dev_priv = dev->dev_private;
3703
11782b02 3704 if (INTEL_INFO(dev)->gen < 5 ||
f691e2f4
DV
3705 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3706 return;
3707
3708 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3709 DISP_TILE_SURFACE_SWIZZLING);
3710
11782b02
DV
3711 if (IS_GEN5(dev))
3712 return;
3713
f691e2f4
DV
3714 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3715 if (IS_GEN6(dev))
6b26c86d 3716 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
f691e2f4 3717 else
6b26c86d 3718 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
f691e2f4 3719}
e21af88d
DV
3720
3721void i915_gem_init_ppgtt(struct drm_device *dev)
3722{
3723 drm_i915_private_t *dev_priv = dev->dev_private;
3724 uint32_t pd_offset;
3725 struct intel_ring_buffer *ring;
55a254ac
DV
3726 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3727 uint32_t __iomem *pd_addr;
3728 uint32_t pd_entry;
e21af88d
DV
3729 int i;
3730
3731 if (!dev_priv->mm.aliasing_ppgtt)
3732 return;
3733
55a254ac
DV
3734
3735 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
3736 for (i = 0; i < ppgtt->num_pd_entries; i++) {
3737 dma_addr_t pt_addr;
3738
3739 if (dev_priv->mm.gtt->needs_dmar)
3740 pt_addr = ppgtt->pt_dma_addr[i];
3741 else
3742 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
3743
3744 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
3745 pd_entry |= GEN6_PDE_VALID;
3746
3747 writel(pd_entry, pd_addr + i);
3748 }
3749 readl(pd_addr);
3750
3751 pd_offset = ppgtt->pd_offset;
e21af88d
DV
3752 pd_offset /= 64; /* in cachelines, */
3753 pd_offset <<= 16;
3754
3755 if (INTEL_INFO(dev)->gen == 6) {
48ecfa10
DV
3756 uint32_t ecochk, gab_ctl, ecobits;
3757
3758 ecobits = I915_READ(GAC_ECO_BITS);
3759 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
be901a5a
DV
3760
3761 gab_ctl = I915_READ(GAB_CTL);
3762 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
3763
3764 ecochk = I915_READ(GAM_ECOCHK);
e21af88d
DV
3765 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3766 ECOCHK_PPGTT_CACHE64B);
6b26c86d 3767 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
e21af88d
DV
3768 } else if (INTEL_INFO(dev)->gen >= 7) {
3769 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
3770 /* GFX_MODE is per-ring on gen7+ */
3771 }
3772
b4519513 3773 for_each_ring(ring, dev_priv, i) {
e21af88d
DV
3774 if (INTEL_INFO(dev)->gen >= 7)
3775 I915_WRITE(RING_MODE_GEN7(ring),
6b26c86d 3776 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
e21af88d
DV
3777
3778 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
3779 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
3780 }
3781}
3782
67b1b571
CW
3783static bool
3784intel_enable_blt(struct drm_device *dev)
3785{
3786 if (!HAS_BLT(dev))
3787 return false;
3788
3789 /* The blitter was dysfunctional on early prototypes */
3790 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
3791 DRM_INFO("BLT not supported on this pre-production hardware;"
3792 " graphics performance will be degraded.\n");
3793 return false;
3794 }
3795
3796 return true;
3797}
3798
8187a2b7 3799int
f691e2f4 3800i915_gem_init_hw(struct drm_device *dev)
8187a2b7
ZN
3801{
3802 drm_i915_private_t *dev_priv = dev->dev_private;
3803 int ret;
68f95ba9 3804
8ecd1a66
DV
3805 if (!intel_enable_gtt())
3806 return -EIO;
3807
b9524a1e
BW
3808 i915_gem_l3_remap(dev);
3809
f691e2f4
DV
3810 i915_gem_init_swizzling(dev);
3811
5c1143bb 3812 ret = intel_init_render_ring_buffer(dev);
68f95ba9 3813 if (ret)
b6913e4b 3814 return ret;
68f95ba9
CW
3815
3816 if (HAS_BSD(dev)) {
5c1143bb 3817 ret = intel_init_bsd_ring_buffer(dev);
68f95ba9
CW
3818 if (ret)
3819 goto cleanup_render_ring;
d1b851fc 3820 }
68f95ba9 3821
67b1b571 3822 if (intel_enable_blt(dev)) {
549f7365
CW
3823 ret = intel_init_blt_ring_buffer(dev);
3824 if (ret)
3825 goto cleanup_bsd_ring;
3826 }
3827
6f392d54
CW
3828 dev_priv->next_seqno = 1;
3829
254f965c
BW
3830 /*
3831 * XXX: There was some w/a described somewhere suggesting loading
3832 * contexts before PPGTT.
3833 */
3834 i915_gem_context_init(dev);
e21af88d
DV
3835 i915_gem_init_ppgtt(dev);
3836
68f95ba9
CW
3837 return 0;
3838
549f7365 3839cleanup_bsd_ring:
1ec14ad3 3840 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
68f95ba9 3841cleanup_render_ring:
1ec14ad3 3842 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
8187a2b7
ZN
3843 return ret;
3844}
3845
1070a42b
CW
3846static bool
3847intel_enable_ppgtt(struct drm_device *dev)
3848{
3849 if (i915_enable_ppgtt >= 0)
3850 return i915_enable_ppgtt;
3851
3852#ifdef CONFIG_INTEL_IOMMU
3853 /* Disable ppgtt on SNB if VT-d is on. */
3854 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
3855 return false;
3856#endif
3857
3858 return true;
3859}
3860
3861int i915_gem_init(struct drm_device *dev)
3862{
3863 struct drm_i915_private *dev_priv = dev->dev_private;
3864 unsigned long gtt_size, mappable_size;
3865 int ret;
3866
3867 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
3868 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
3869
3870 mutex_lock(&dev->struct_mutex);
3871 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
3872 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
3873 * aperture accordingly when using aliasing ppgtt. */
3874 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
3875
3876 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
3877
3878 ret = i915_gem_init_aliasing_ppgtt(dev);
3879 if (ret) {
3880 mutex_unlock(&dev->struct_mutex);
3881 return ret;
3882 }
3883 } else {
3884 /* Let GEM Manage all of the aperture.
3885 *
3886 * However, leave one page at the end still bound to the scratch
3887 * page. There are a number of places where the hardware
3888 * apparently prefetches past the end of the object, and we've
3889 * seen multiple hangs with the GPU head pointer stuck in a
3890 * batchbuffer bound at the last page of the aperture. One page
3891 * should be enough to keep any prefetching inside of the
3892 * aperture.
3893 */
3894 i915_gem_init_global_gtt(dev, 0, mappable_size,
3895 gtt_size);
3896 }
3897
3898 ret = i915_gem_init_hw(dev);
3899 mutex_unlock(&dev->struct_mutex);
3900 if (ret) {
3901 i915_gem_cleanup_aliasing_ppgtt(dev);
3902 return ret;
3903 }
3904
53ca26ca
DV
3905 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
3906 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3907 dev_priv->dri1.allow_batchbuffer = 1;
1070a42b
CW
3908 return 0;
3909}
3910
8187a2b7
ZN
3911void
3912i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3913{
3914 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 3915 struct intel_ring_buffer *ring;
1ec14ad3 3916 int i;
8187a2b7 3917
b4519513
CW
3918 for_each_ring(ring, dev_priv, i)
3919 intel_cleanup_ring_buffer(ring);
8187a2b7
ZN
3920}
3921
673a394b
EA
3922int
3923i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3924 struct drm_file *file_priv)
3925{
3926 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 3927 int ret;
673a394b 3928
79e53945
JB
3929 if (drm_core_check_feature(dev, DRIVER_MODESET))
3930 return 0;
3931
ba1234d1 3932 if (atomic_read(&dev_priv->mm.wedged)) {
673a394b 3933 DRM_ERROR("Reenabling wedged hardware, good luck\n");
ba1234d1 3934 atomic_set(&dev_priv->mm.wedged, 0);
673a394b
EA
3935 }
3936
673a394b 3937 mutex_lock(&dev->struct_mutex);
9bb2d6f9
EA
3938 dev_priv->mm.suspended = 0;
3939
f691e2f4 3940 ret = i915_gem_init_hw(dev);
d816f6ac
WF
3941 if (ret != 0) {
3942 mutex_unlock(&dev->struct_mutex);
9bb2d6f9 3943 return ret;
d816f6ac 3944 }
9bb2d6f9 3945
69dc4987 3946 BUG_ON(!list_empty(&dev_priv->mm.active_list));
673a394b 3947 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
673a394b 3948 mutex_unlock(&dev->struct_mutex);
dbb19d30 3949
5f35308b
CW
3950 ret = drm_irq_install(dev);
3951 if (ret)
3952 goto cleanup_ringbuffer;
dbb19d30 3953
673a394b 3954 return 0;
5f35308b
CW
3955
3956cleanup_ringbuffer:
3957 mutex_lock(&dev->struct_mutex);
3958 i915_gem_cleanup_ringbuffer(dev);
3959 dev_priv->mm.suspended = 1;
3960 mutex_unlock(&dev->struct_mutex);
3961
3962 return ret;
673a394b
EA
3963}
3964
3965int
3966i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3967 struct drm_file *file_priv)
3968{
79e53945
JB
3969 if (drm_core_check_feature(dev, DRIVER_MODESET))
3970 return 0;
3971
dbb19d30 3972 drm_irq_uninstall(dev);
e6890f6f 3973 return i915_gem_idle(dev);
673a394b
EA
3974}
3975
3976void
3977i915_gem_lastclose(struct drm_device *dev)
3978{
3979 int ret;
673a394b 3980
e806b495
EA
3981 if (drm_core_check_feature(dev, DRIVER_MODESET))
3982 return;
3983
6dbe2772
KP
3984 ret = i915_gem_idle(dev);
3985 if (ret)
3986 DRM_ERROR("failed to idle hardware: %d\n", ret);
673a394b
EA
3987}
3988
64193406
CW
3989static void
3990init_ring_lists(struct intel_ring_buffer *ring)
3991{
3992 INIT_LIST_HEAD(&ring->active_list);
3993 INIT_LIST_HEAD(&ring->request_list);
64193406
CW
3994}
3995
673a394b
EA
3996void
3997i915_gem_load(struct drm_device *dev)
3998{
b5aa8a0f 3999 int i;
673a394b
EA
4000 drm_i915_private_t *dev_priv = dev->dev_private;
4001
69dc4987 4002 INIT_LIST_HEAD(&dev_priv->mm.active_list);
673a394b 4003 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
6c085a72
CW
4004 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4005 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
a09ba7fa 4006 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
1ec14ad3
CW
4007 for (i = 0; i < I915_NUM_RINGS; i++)
4008 init_ring_lists(&dev_priv->ring[i]);
4b9de737 4009 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
007cc8ac 4010 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
673a394b
EA
4011 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4012 i915_gem_retire_work_handler);
30dbf0c0 4013 init_completion(&dev_priv->error_completion);
31169714 4014
94400120
DA
4015 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4016 if (IS_GEN3(dev)) {
50743298
DV
4017 I915_WRITE(MI_ARB_STATE,
4018 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
94400120
DA
4019 }
4020
72bfa19c
CW
4021 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4022
de151cf6 4023 /* Old X drivers will take 0-2 for front, back, depth buffers */
b397c836
EA
4024 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4025 dev_priv->fence_reg_start = 3;
de151cf6 4026
a6c45cf0 4027 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
de151cf6
JB
4028 dev_priv->num_fence_regs = 16;
4029 else
4030 dev_priv->num_fence_regs = 8;
4031
b5aa8a0f 4032 /* Initialize fence registers to zero */
ada726c7 4033 i915_gem_reset_fences(dev);
10ed13e4 4034
673a394b 4035 i915_gem_detect_bit_6_swizzle(dev);
6b95a207 4036 init_waitqueue_head(&dev_priv->pending_flip_queue);
17250b71 4037
ce453d81
CW
4038 dev_priv->mm.interruptible = true;
4039
17250b71
CW
4040 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4041 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4042 register_shrinker(&dev_priv->mm.inactive_shrinker);
673a394b 4043}
71acb5eb
DA
4044
4045/*
4046 * Create a physically contiguous memory object for this object
4047 * e.g. for cursor + overlay regs
4048 */
995b6762
CW
4049static int i915_gem_init_phys_object(struct drm_device *dev,
4050 int id, int size, int align)
71acb5eb
DA
4051{
4052 drm_i915_private_t *dev_priv = dev->dev_private;
4053 struct drm_i915_gem_phys_object *phys_obj;
4054 int ret;
4055
4056 if (dev_priv->mm.phys_objs[id - 1] || !size)
4057 return 0;
4058
9a298b2a 4059 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
71acb5eb
DA
4060 if (!phys_obj)
4061 return -ENOMEM;
4062
4063 phys_obj->id = id;
4064
6eeefaf3 4065 phys_obj->handle = drm_pci_alloc(dev, size, align);
71acb5eb
DA
4066 if (!phys_obj->handle) {
4067 ret = -ENOMEM;
4068 goto kfree_obj;
4069 }
4070#ifdef CONFIG_X86
4071 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4072#endif
4073
4074 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4075
4076 return 0;
4077kfree_obj:
9a298b2a 4078 kfree(phys_obj);
71acb5eb
DA
4079 return ret;
4080}
4081
995b6762 4082static void i915_gem_free_phys_object(struct drm_device *dev, int id)
71acb5eb
DA
4083{
4084 drm_i915_private_t *dev_priv = dev->dev_private;
4085 struct drm_i915_gem_phys_object *phys_obj;
4086
4087 if (!dev_priv->mm.phys_objs[id - 1])
4088 return;
4089
4090 phys_obj = dev_priv->mm.phys_objs[id - 1];
4091 if (phys_obj->cur_obj) {
4092 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4093 }
4094
4095#ifdef CONFIG_X86
4096 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4097#endif
4098 drm_pci_free(dev, phys_obj->handle);
4099 kfree(phys_obj);
4100 dev_priv->mm.phys_objs[id - 1] = NULL;
4101}
4102
4103void i915_gem_free_all_phys_object(struct drm_device *dev)
4104{
4105 int i;
4106
260883c8 4107 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
71acb5eb
DA
4108 i915_gem_free_phys_object(dev, i);
4109}
4110
4111void i915_gem_detach_phys_object(struct drm_device *dev,
05394f39 4112 struct drm_i915_gem_object *obj)
71acb5eb 4113{
05394f39 4114 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
e5281ccd 4115 char *vaddr;
71acb5eb 4116 int i;
71acb5eb
DA
4117 int page_count;
4118
05394f39 4119 if (!obj->phys_obj)
71acb5eb 4120 return;
05394f39 4121 vaddr = obj->phys_obj->handle->vaddr;
71acb5eb 4122
05394f39 4123 page_count = obj->base.size / PAGE_SIZE;
71acb5eb 4124 for (i = 0; i < page_count; i++) {
5949eac4 4125 struct page *page = shmem_read_mapping_page(mapping, i);
e5281ccd
CW
4126 if (!IS_ERR(page)) {
4127 char *dst = kmap_atomic(page);
4128 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4129 kunmap_atomic(dst);
4130
4131 drm_clflush_pages(&page, 1);
4132
4133 set_page_dirty(page);
4134 mark_page_accessed(page);
4135 page_cache_release(page);
4136 }
71acb5eb 4137 }
40ce6575 4138 intel_gtt_chipset_flush();
d78b47b9 4139
05394f39
CW
4140 obj->phys_obj->cur_obj = NULL;
4141 obj->phys_obj = NULL;
71acb5eb
DA
4142}
4143
4144int
4145i915_gem_attach_phys_object(struct drm_device *dev,
05394f39 4146 struct drm_i915_gem_object *obj,
6eeefaf3
CW
4147 int id,
4148 int align)
71acb5eb 4149{
05394f39 4150 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
71acb5eb 4151 drm_i915_private_t *dev_priv = dev->dev_private;
71acb5eb
DA
4152 int ret = 0;
4153 int page_count;
4154 int i;
4155
4156 if (id > I915_MAX_PHYS_OBJECT)
4157 return -EINVAL;
4158
05394f39
CW
4159 if (obj->phys_obj) {
4160 if (obj->phys_obj->id == id)
71acb5eb
DA
4161 return 0;
4162 i915_gem_detach_phys_object(dev, obj);
4163 }
4164
71acb5eb
DA
4165 /* create a new object */
4166 if (!dev_priv->mm.phys_objs[id - 1]) {
4167 ret = i915_gem_init_phys_object(dev, id,
05394f39 4168 obj->base.size, align);
71acb5eb 4169 if (ret) {
05394f39
CW
4170 DRM_ERROR("failed to init phys object %d size: %zu\n",
4171 id, obj->base.size);
e5281ccd 4172 return ret;
71acb5eb
DA
4173 }
4174 }
4175
4176 /* bind to the object */
05394f39
CW
4177 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4178 obj->phys_obj->cur_obj = obj;
71acb5eb 4179
05394f39 4180 page_count = obj->base.size / PAGE_SIZE;
71acb5eb
DA
4181
4182 for (i = 0; i < page_count; i++) {
e5281ccd
CW
4183 struct page *page;
4184 char *dst, *src;
4185
5949eac4 4186 page = shmem_read_mapping_page(mapping, i);
e5281ccd
CW
4187 if (IS_ERR(page))
4188 return PTR_ERR(page);
71acb5eb 4189
ff75b9bc 4190 src = kmap_atomic(page);
05394f39 4191 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
71acb5eb 4192 memcpy(dst, src, PAGE_SIZE);
3e4d3af5 4193 kunmap_atomic(src);
71acb5eb 4194
e5281ccd
CW
4195 mark_page_accessed(page);
4196 page_cache_release(page);
4197 }
d78b47b9 4198
71acb5eb 4199 return 0;
71acb5eb
DA
4200}
4201
4202static int
05394f39
CW
4203i915_gem_phys_pwrite(struct drm_device *dev,
4204 struct drm_i915_gem_object *obj,
71acb5eb
DA
4205 struct drm_i915_gem_pwrite *args,
4206 struct drm_file *file_priv)
4207{
05394f39 4208 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
b47b30cc 4209 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
71acb5eb 4210
b47b30cc
CW
4211 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4212 unsigned long unwritten;
4213
4214 /* The physical object once assigned is fixed for the lifetime
4215 * of the obj, so we can safely drop the lock and continue
4216 * to access vaddr.
4217 */
4218 mutex_unlock(&dev->struct_mutex);
4219 unwritten = copy_from_user(vaddr, user_data, args->size);
4220 mutex_lock(&dev->struct_mutex);
4221 if (unwritten)
4222 return -EFAULT;
4223 }
71acb5eb 4224
40ce6575 4225 intel_gtt_chipset_flush();
71acb5eb
DA
4226 return 0;
4227}
b962442e 4228
f787a5f5 4229void i915_gem_release(struct drm_device *dev, struct drm_file *file)
b962442e 4230{
f787a5f5 4231 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e
EA
4232
4233 /* Clean up our request list when the client is going away, so that
4234 * later retire_requests won't dereference our soon-to-be-gone
4235 * file_priv.
4236 */
1c25595f 4237 spin_lock(&file_priv->mm.lock);
f787a5f5
CW
4238 while (!list_empty(&file_priv->mm.request_list)) {
4239 struct drm_i915_gem_request *request;
4240
4241 request = list_first_entry(&file_priv->mm.request_list,
4242 struct drm_i915_gem_request,
4243 client_list);
4244 list_del(&request->client_list);
4245 request->file_priv = NULL;
4246 }
1c25595f 4247 spin_unlock(&file_priv->mm.lock);
b962442e 4248}
31169714 4249
31169714 4250static int
1495f230 4251i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
31169714 4252{
17250b71
CW
4253 struct drm_i915_private *dev_priv =
4254 container_of(shrinker,
4255 struct drm_i915_private,
4256 mm.inactive_shrinker);
4257 struct drm_device *dev = dev_priv->dev;
6c085a72 4258 struct drm_i915_gem_object *obj;
1495f230 4259 int nr_to_scan = sc->nr_to_scan;
17250b71
CW
4260 int cnt;
4261
4262 if (!mutex_trylock(&dev->struct_mutex))
bbe2e11a 4263 return 0;
31169714 4264
6c085a72
CW
4265 if (nr_to_scan) {
4266 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4267 if (nr_to_scan > 0)
4268 i915_gem_shrink_all(dev_priv);
31169714
CW
4269 }
4270
17250b71 4271 cnt = 0;
6c085a72
CW
4272 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
4273 cnt += obj->base.size >> PAGE_SHIFT;
4274 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
4275 if (obj->pin_count == 0)
4276 cnt += obj->base.size >> PAGE_SHIFT;
17250b71 4277
17250b71 4278 mutex_unlock(&dev->struct_mutex);
6c085a72 4279 return cnt;
31169714 4280}