From bbe661ab6ce7d7f950a51510945b499fbebd49dd Mon Sep 17 00:00:00 2001 From: Hridya Valsaraju Date: Sun, 25 Jul 2021 20:49:06 -0700 Subject: [PATCH] ANDROID: staging: ion: move buffer kmap from begin/end_cpu_access() Since dma_buf_begin/end_cpu_access() calls always used to bracket dma_buf_kmap/kunmap calls, ION performed kmap/kunmap invocations for the buffer during dma_buf_begin/end_cpu_access() calls and cached the results with a kmap counter. However, dma_buf_begin/end_cpu_access() invocations can be triggered from the userspace using the DMA_BUF_IOC_SYNC ioctl as well. This means that a mapping that was created by a device driver using by a dma_buf_kmap() call or an ion_map_kernel() call could be unmapped from userspace if a client accidentally(or maliciously) invoked DMA_BUF_IOCTL_SYNC IOCTL with 'DMA_BUF_SYNC_END' argument since this would inturn invoke dma_buf_end_cpu_access() which would then decrement the kmap counter and invoke kunmap() when the counter gets to 0. This patch moves the kmap/kunmap operations from the begin/end_cpu_access() DMA-BUF ops to the map/unmap DMA-BUF ops to prevent the issue. Mot-CRs-fixed: (CR) CVE-Fixed: CVE-2021-0929 Bug: 187527909 Change-Id: I00dc8eefefb1f3aab99e770f90d624011f7740f0 [hridya: minor conflicts during cherry-picking] Signed-off-by: Hridya Valsaraju Signed-off-by: Gajjala Chakradhar Reviewed-on: https://gerrit.mot.com/2098202 SLTApproved: Slta Waiver SME-Granted: SME Approvals Granted Tested-by: Jira Key Reviewed-by: Xiangpo Zhao Submit-Approved: Jira Key (cherry picked from commit fb16b88a7e0788e2e32bedd952883e7a8831c941) --- drivers/staging/android/ion/ion.c | 39 +++++++++++++++++-------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index a61ad765a540..59a8824d73d9 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -335,13 +335,34 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf) static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) { struct ion_buffer *buffer = dmabuf->priv; + void *vaddr; + + if (!buffer->heap->ops->map_kernel) { + pr_err("%s: map kernel is not implemented by this heap.\n", + __func__); + return ERR_PTR(-ENOTTY); + } + mutex_lock(&buffer->lock); + vaddr = ion_buffer_kmap_get(buffer); + mutex_unlock(&buffer->lock); + + if (IS_ERR(vaddr)) + return vaddr; - return buffer->vaddr + offset * PAGE_SIZE; + return vaddr + offset * PAGE_SIZE; } static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, void *ptr) { + struct ion_buffer *buffer = dmabuf->priv; + + if (buffer->heap->ops->map_kernel) { + mutex_lock(&buffer->lock); + ion_buffer_kmap_put(buffer); + mutex_unlock(&buffer->lock); + } + } static void *ion_dma_buf_vmap(struct dma_buf *dmabuf) @@ -373,18 +394,8 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; - void *vaddr; struct dma_buf_attachment *att; - /* - * TODO: Move this elsewhere because we don't always need a vaddr - */ - if (buffer->heap->ops->map_kernel) { - mutex_lock(&buffer->lock); - vaddr = ion_buffer_kmap_get(buffer); - mutex_unlock(&buffer->lock); - } - mutex_lock(&dmabuf->lock); list_for_each_entry(att, &dmabuf->attachments, node) { struct sg_table *table = att->priv; @@ -403,12 +414,6 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, struct ion_buffer *buffer = dmabuf->priv; struct dma_buf_attachment *att; - if (buffer->heap->ops->map_kernel) { - mutex_lock(&buffer->lock); - ion_buffer_kmap_put(buffer); - mutex_unlock(&buffer->lock); - } - mutex_lock(&dmabuf->lock); list_for_each_entry(att, &dmabuf->attachments, node) { struct sg_table *table = att->priv; -- 2.20.1