drm/i915: Pin backing pages for pread
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 4 Sep 2012 20:02:56 +0000 (21:02 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 20 Sep 2012 12:22:57 +0000 (14:22 +0200)
By using the recently introduced pinning of pages, we can safely drop
the mutex in the knowledge that the pages are not going to disappear
beneath us, and so we can simplify the code for iterating over the pages.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem.c

index 73702e583eb986c61ab2fe1dee4ca63d8584e7df..26c8bf9c5fa6a3304a69489d8a073a9b8a80621e 100644 (file)
@@ -343,7 +343,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
                                      page_length);
        kunmap_atomic(vaddr);
 
-       return ret;
+       return ret ? -EFAULT : 0;
 }
 
 static void
@@ -394,7 +394,7 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
                                     page_length);
        kunmap(page);
 
-       return ret;
+       return ret ? - EFAULT : 0;
 }
 
 static int
@@ -403,7 +403,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
                     struct drm_i915_gem_pread *args,
                     struct drm_file *file)
 {
-       struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
        char __user *user_data;
        ssize_t remain;
        loff_t offset;
@@ -412,7 +411,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
        int hit_slowpath = 0;
        int prefaulted = 0;
        int needs_clflush = 0;
-       int release_page;
 
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
@@ -433,6 +431,12 @@ i915_gem_shmem_pread(struct drm_device *dev,
                }
        }
 
+       ret = i915_gem_object_get_pages(obj);
+       if (ret)
+               return ret;
+
+       i915_gem_object_pin_pages(obj);
+
        offset = args->offset;
 
        while (remain > 0) {
@@ -448,18 +452,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
                if ((shmem_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - shmem_page_offset;
 
-               if (obj->pages) {
-                       page = obj->pages[offset >> PAGE_SHIFT];
-                       release_page = 0;
-               } else {
-                       page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
-                       if (IS_ERR(page)) {
-                               ret = PTR_ERR(page);
-                               goto out;
-                       }
-                       release_page = 1;
-               }
-
+               page = obj->pages[offset >> PAGE_SHIFT];
                page_do_bit17_swizzling = obj_do_bit17_swizzling &&
                        (page_to_phys(page) & (1 << 17)) != 0;
 
@@ -470,7 +463,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
                        goto next_page;
 
                hit_slowpath = 1;
-               page_cache_get(page);
                mutex_unlock(&dev->struct_mutex);
 
                if (!prefaulted) {
@@ -488,16 +480,12 @@ i915_gem_shmem_pread(struct drm_device *dev,
                                       needs_clflush);
 
                mutex_lock(&dev->struct_mutex);
-               page_cache_release(page);
+
 next_page:
                mark_page_accessed(page);
-               if (release_page)
-                       page_cache_release(page);
 
-               if (ret) {
-                       ret = -EFAULT;
+               if (ret)
                        goto out;
-               }
 
                remain -= page_length;
                user_data += page_length;
@@ -505,6 +493,8 @@ next_page:
        }
 
 out:
+       i915_gem_object_unpin_pages(obj);
+
        if (hit_slowpath) {
                /* Fixup: Kill any reinstated backing storage pages */
                if (obj->madv == __I915_MADV_PURGED)