drm/i915: Use remap_io_mapping() to prefault all PTE in a single pass
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 19 Aug 2016 15:54:28 +0000 (16:54 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 19 Aug 2016 16:13:36 +0000 (17:13 +0100)
Very old numbers indicate this is a 66% improvement when remapping the
entire object for fence contention - due to the elimination of
track_pfn_insert and its strcmp.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Testcase: igt/gem_fence_upload/performance
Testcase: igt/gem_mmap_gtt
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160819155428.1670-6-chris@chris-wilson.co.uk
drivers/gpu/drm/Makefile
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_mm.c [new file with mode: 0644]

index 0238bf8bc8c37458d06cb34903dff22a114ed6e2..3ff094171ee548e827f066e86f55867c1172c039 100644 (file)
@@ -46,7 +46,7 @@ obj-$(CONFIG_DRM_RADEON)+= radeon/
 obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
 obj-$(CONFIG_DRM_MGA)  += mga/
 obj-$(CONFIG_DRM_I810) += i810/
-obj-$(CONFIG_DRM_I915)  += i915/
+obj-$(CONFIG_DRM_I915) += i915/
 obj-$(CONFIG_DRM_MGAG200) += mgag200/
 obj-$(CONFIG_DRM_VC4)  += vc4/
 obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
index 3412413408c0c3caf4a003ac1e8b71b23bcf4de5..a7da24640e88c531464d851533a316aacaf4c036 100644 (file)
@@ -12,6 +12,7 @@ subdir-ccflags-y += \
 i915-y := i915_drv.o \
          i915_irq.o \
          i915_memcpy.o \
+         i915_mm.o \
          i915_params.o \
          i915_pci.o \
           i915_suspend.o \
@@ -113,6 +114,6 @@ i915-y += intel_gvt.o
 include $(src)/gvt/Makefile
 endif
 
-obj-$(CONFIG_DRM_I915)  += i915.o
+obj-$(CONFIG_DRM_I915) += i915.o
 
 CFLAGS_i915_trace_points.o := -I$(src)
index 016425c0b47573712f6b80ee28eb37f6dc02939c..9cd102cd931e0d5d29c4c8d24040c59cbbd52703 100644 (file)
@@ -3931,6 +3931,11 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
 void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
 
+/* i915_mm.c */
+int remap_io_mapping(struct vm_area_struct *vma,
+                    unsigned long addr, unsigned long pfn, unsigned long size,
+                    struct io_mapping *iomap);
+
 #define ptr_mask_bits(ptr) ({                                          \
        unsigned long __v = (unsigned long)(ptr);                       \
        (typeof(ptr))(__v & PAGE_MASK);                                 \
index 5398af7f7580bdb4399b98e47e58e40836fc50da..04607d4115d683beb9ba1f396d18307e575e7085 100644 (file)
@@ -1705,7 +1705,6 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
        bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
        struct i915_vma *vma;
        pgoff_t page_offset;
-       unsigned long pfn;
        unsigned int flags;
        int ret;
 
@@ -1790,48 +1789,13 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
                goto err_unpin;
 
        /* Finally, remap it using the new GTT offset */
-       pfn = ggtt->mappable_base + i915_ggtt_offset(vma);
-       pfn >>= PAGE_SHIFT;
-
-       if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
-               if (!obj->fault_mappable) {
-                       unsigned long size =
-                               min_t(unsigned long,
-                                     area->vm_end - area->vm_start,
-                                     obj->base.size) >> PAGE_SHIFT;
-                       unsigned long base = area->vm_start;
-                       int i;
-
-                       for (i = 0; i < size; i++) {
-                               ret = vm_insert_pfn(area,
-                                                   base + i * PAGE_SIZE,
-                                                   pfn + i);
-                               if (ret)
-                                       break;
-                       }
-               } else
-                       ret = vm_insert_pfn(area,
-                                           (unsigned long)vmf->virtual_address,
-                                           pfn + page_offset);
-       } else {
-               /* Overriding existing pages in partial view does not cause
-                * us any trouble as TLBs are still valid because the fault
-                * is due to userspace losing part of the mapping or never
-                * having accessed it before (at this partials' range).
-                */
-               const struct i915_ggtt_view *view = &vma->ggtt_view;
-               unsigned long base = area->vm_start +
-                       (view->params.partial.offset << PAGE_SHIFT);
-               unsigned int i;
-
-               for (i = 0; i < view->params.partial.size; i++) {
-                       ret = vm_insert_pfn(area,
-                                           base + i * PAGE_SIZE,
-                                           pfn + i);
-                       if (ret)
-                               break;
-               }
-       }
+       ret = remap_io_mapping(area,
+                              area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
+                              (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
+                              min_t(u64, vma->size, area->vm_end - area->vm_start),
+                              &ggtt->mappable);
+       if (ret)
+               goto err_unpin;
 
        obj->fault_mappable = true;
 err_unpin:
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
new file mode 100644 (file)
index 0000000..e4935dd
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/io-mapping.h>
+
+#include <asm/pgtable.h>
+
+#include "i915_drv.h"
+
+struct remap_pfn {
+       struct mm_struct *mm;
+       unsigned long pfn;
+       pgprot_t prot;
+};
+
+static int remap_pfn(pte_t *pte, pgtable_t token,
+                    unsigned long addr, void *data)
+{
+       struct remap_pfn *r = data;
+
+       /* Special PTE are not associated with any struct page */
+       set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
+       r->pfn++;
+
+       return 0;
+}
+
+/**
+ * remap_io_mapping - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @pfn: physical address of kernel memory
+ * @size: size of map area
+ * @iomap: the source io_mapping
+ *
+ *  Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_mapping(struct vm_area_struct *vma,
+                    unsigned long addr, unsigned long pfn, unsigned long size,
+                    struct io_mapping *iomap)
+{
+       struct remap_pfn r;
+       int err;
+
+       GEM_BUG_ON((vma->vm_flags &
+                   (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)) !=
+                  (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP));
+
+       /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+       r.mm = vma->vm_mm;
+       r.pfn = pfn;
+       r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
+                         (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
+
+       err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
+       if (unlikely(err)) {
+               zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
+               return err;
+       }
+
+       return 0;
+}