drm/i915: Prevent writing into a read-only object via a GGTT mmap
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 12 Jul 2018 18:53:13 +0000 (19:53 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 Nov 2019 18:18:49 +0000 (19:18 +0100)
commit 3e977ac6179b39faa3c0eda5fce4f00663ae298d upstream.

If the user has created a read-only object, they should not be allowed
to circumvent the write protection by using a GGTT mmapping. Deny it.

Also most machines do not support read-only GGTT PTEs, so again we have
to reject attempted writes. Fortunately, this is known a priori, so we
can at least reject in the call to create the mmap (with a sanity check
in the fault handler).

v2: Check the vma->vm_flags during mmap() to allow readonly access.
v3: Remove VM_MAYWRITE to curtail mprotect()

Testcase: igt/gem_userptr_blits/readonly_mmap*
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: David Herrmann <dh.herrmann@gmail.com>
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com> #v1
Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180712185315.3288-4-chris@chris-wilson.co.uk
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/intel_ringbuffer.c
include/drm/drm_vma_manager.h

index c55f338e380b9d0d86da89012cd9109689ff6bd2..d2c042af36b8d160c1fbdc6d35147b772c4957a5 100644 (file)
@@ -1035,6 +1035,15 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
                return -EACCES;
        }
 
+       if (node->readonly) {
+               if (vma->vm_flags & VM_WRITE) {
+                       drm_gem_object_put_unlocked(obj);
+                       return -EINVAL;
+               }
+
+               vma->vm_flags &= ~VM_MAYWRITE;
+       }
+
        ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
                               vma);
 
index 727018a16cca24ea1d95e20e2c55988ca40d2de8..f95e2c8ac239a77c942f5fb783574f8a7593e92a 100644 (file)
@@ -1834,6 +1834,10 @@ int i915_gem_fault(struct vm_fault *vmf)
        unsigned int flags;
        int ret;
 
+       /* Sanity check that we allow writing into this object */
+       if (i915_gem_object_is_readonly(obj) && write)
+               return VM_FAULT_SIGBUS;
+
        /* We don't use vmf->pgoff since that has the fake offset */
        page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
 
index 920d064e9b53ee9aca19f55c7168dbc642e99171..6f5e1e18e530afa5a765635966ef1284a23a15fe 100644 (file)
@@ -209,7 +209,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
 
        /* Applicable to VLV, and gen8+ */
        pte_flags = 0;
-       if (vma->obj->gt_ro)
+       if (i915_gem_object_is_readonly(vma->obj))
                pte_flags |= PTE_READ_ONLY;
 
        vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
@@ -2105,8 +2105,10 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
        const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
        dma_addr_t addr;
 
-       /* The GTT does not support read-only mappings */
-       GEM_BUG_ON(flags & PTE_READ_ONLY);
+       /*
+        * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+        * not to allow the user to override access to a read only page.
+        */
 
        gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
        gtt_entries += vma->node.start >> PAGE_SHIFT;
@@ -2354,7 +2356,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 
        /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
        pte_flags = 0;
-       if (obj->gt_ro)
+       if (i915_gem_object_is_readonly(obj))
                pte_flags |= PTE_READ_ONLY;
 
        intel_runtime_pm_get(i915);
@@ -2396,7 +2398,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 
        /* Currently applicable only to VLV */
        pte_flags = 0;
-       if (vma->obj->gt_ro)
+       if (i915_gem_object_is_readonly(vma->obj))
                pte_flags |= PTE_READ_ONLY;
 
        if (flags & I915_VMA_LOCAL_BIND) {
index c30d8f80818587bfe4500a08b377b2ac45836f68..39cfe04dcdb8bb7c09126d8087d195c0617a6e0b 100644 (file)
@@ -140,7 +140,6 @@ struct drm_i915_gem_object {
         * Is the object to be mapped as read-only to the GPU
         * Only honoured if hardware has relevant pte bit
         */
-       unsigned long gt_ro:1;
        unsigned int cache_level:3;
        unsigned int cache_coherent:2;
 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
@@ -313,6 +312,18 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
        reservation_object_unlock(obj->resv);
 }
 
+static inline void
+i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
+{
+       obj->base.vma_node.readonly = true;
+}
+
+static inline bool
+i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
+{
+       return obj->base.vma_node.readonly;
+}
+
 static inline bool
 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 {
index 19fe2489065a41e39e6382c24abcde269d7b203a..63667a5c2c871be83bfe37b19a832a607c4625a9 100644 (file)
@@ -1373,7 +1373,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
         * if supported by the platform's GGTT.
         */
        if (vm->has_read_only)
-               obj->gt_ro = 1;
+               i915_gem_object_set_readonly(obj);
 
        vma = i915_vma_instance(obj, vm, NULL);
        if (IS_ERR(vma))
index d84d52f6d2b119bd450af591eb2ccde38908ba15..b54c98f05460b9732647d7bc98e1b2df9b50f515 100644 (file)
@@ -41,6 +41,7 @@ struct drm_vma_offset_node {
        rwlock_t vm_lock;
        struct drm_mm_node vm_node;
        struct rb_root vm_files;
+       bool readonly:1;
 };
 
 struct drm_vma_offset_manager {