ANDROID: staging: ion: move buffer kmap from begin/end_cpu_access()
authorHridya Valsaraju <hridya@google.com>
Mon, 26 Jul 2021 03:49:06 +0000 (20:49 -0700)
committerPDO SCM Team <hudsoncm@motorola.com>
Mon, 8 Nov 2021 03:42:54 +0000 (21:42 -0600)
Since dma_buf_begin/end_cpu_access() calls always used to bracket
dma_buf_kmap/kunmap calls, ION performed kmap/kunmap invocations for the
buffer during dma_buf_begin/end_cpu_access() calls and cached the
results with a kmap counter.
However, dma_buf_begin/end_cpu_access() invocations can be
triggered from the userspace using the DMA_BUF_IOC_SYNC ioctl as well.
This means that a mapping that was created by a device driver using by a
dma_buf_kmap() call or an ion_map_kernel() call could be unmapped
from userspace if a client accidentally(or maliciously) invoked
DMA_BUF_IOCTL_SYNC IOCTL with 'DMA_BUF_SYNC_END' argument since this
would inturn invoke dma_buf_end_cpu_access() which would then decrement
the kmap counter and invoke kunmap() when the counter gets to 0.

This patch moves the kmap/kunmap operations from the
begin/end_cpu_access() DMA-BUF ops to the map/unmap DMA-BUF ops to
prevent the issue.

Mot-CRs-fixed: (CR)
CVE-Fixed: CVE-2021-0929

Bug: 187527909
Change-Id: I00dc8eefefb1f3aab99e770f90d624011f7740f0
[hridya: minor conflicts during cherry-picking]
Signed-off-by: Hridya Valsaraju <hridya@google.com>
Signed-off-by: Gajjala Chakradhar <gajjalac@motorola.com>
Reviewed-on: https://gerrit.mot.com/2098202
SLTApproved: Slta Waiver
SME-Granted: SME Approvals Granted
Tested-by: Jira Key
Reviewed-by: Xiangpo Zhao <zhaoxp3@motorola.com>
Submit-Approved: Jira Key
(cherry picked from commit fb16b88a7e0788e2e32bedd952883e7a8831c941)

drivers/staging/android/ion/ion.c

index a61ad765a540b5b1b084bf158de261c91238ca93..59a8824d73d95e462ce26d2bd7e0629996f0a43f 100644 (file)
@@ -335,13 +335,34 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
 {
        struct ion_buffer *buffer = dmabuf->priv;
+       void *vaddr;
+
+       if (!buffer->heap->ops->map_kernel) {
+               pr_err("%s: map kernel is not implemented by this heap.\n",
+                      __func__);
+               return ERR_PTR(-ENOTTY);
+       }
+       mutex_lock(&buffer->lock);
+       vaddr = ion_buffer_kmap_get(buffer);
+       mutex_unlock(&buffer->lock);
+
+       if (IS_ERR(vaddr))
+               return vaddr;
 
-       return buffer->vaddr + offset * PAGE_SIZE;
+       return vaddr + offset * PAGE_SIZE;
 }
 
 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
                               void *ptr)
 {
+       struct ion_buffer *buffer = dmabuf->priv;
+
+       if (buffer->heap->ops->map_kernel) {
+               mutex_lock(&buffer->lock);
+               ion_buffer_kmap_put(buffer);
+               mutex_unlock(&buffer->lock);
+       }
+
 }
 
 static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
@@ -373,18 +394,8 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
                                        enum dma_data_direction direction)
 {
        struct ion_buffer *buffer = dmabuf->priv;
-       void *vaddr;
        struct dma_buf_attachment *att;
 
-       /*
-        * TODO: Move this elsewhere because we don't always need a vaddr
-        */
-       if (buffer->heap->ops->map_kernel) {
-               mutex_lock(&buffer->lock);
-               vaddr = ion_buffer_kmap_get(buffer);
-               mutex_unlock(&buffer->lock);
-       }
-
        mutex_lock(&dmabuf->lock);
        list_for_each_entry(att, &dmabuf->attachments, node) {
                struct sg_table *table = att->priv;
@@ -403,12 +414,6 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
        struct ion_buffer *buffer = dmabuf->priv;
        struct dma_buf_attachment *att;
 
-       if (buffer->heap->ops->map_kernel) {
-               mutex_lock(&buffer->lock);
-               ion_buffer_kmap_put(buffer);
-               mutex_unlock(&buffer->lock);
-       }
-
        mutex_lock(&dmabuf->lock);
        list_for_each_entry(att, &dmabuf->attachments, node) {
                struct sg_table *table = att->priv;