android: ion: add nozero and sync_force flags
authorCho KyongHo <pullip.cho@samsung.com>
Fri, 9 Feb 2018 07:18:43 +0000 (16:18 +0900)
committerSangwook Ju <sw.ju@samsung.com>
Mon, 14 May 2018 10:45:23 +0000 (19:45 +0900)
Allocation with ION_FLAG_NOZERED returns buffers without clearing data
written by previous users. ION should not allow userspace accessing
the buffer due to the security reason.
ION_FLAG_SYNC_FORCE is also introduced for some users that want to
decide the point of cache maintenance by itself. With the flag, ION
performs cache flush to the allocated even for cached buffers. those
users want this redundant cache flush to prevent buffer corruption by
victim dirty cache lines about the buffer after DMA write without
proper cache maintenance.

Change-Id: Ib3c4f9c93a6cd0240ae12b8c061142355e2b1605
Signed-off-by: Cho KyongHo <pullip.cho@samsung.com>
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion.h
drivers/staging/android/ion/ion_cma_heap.c
drivers/staging/android/ion/ion_page_pool.c
drivers/staging/android/ion/ion_system_heap.c
drivers/staging/android/uapi/ion.h

index 3c3c5ffcbad80c5e093cbb3245a7c2168420efee..79522c7735ad868ae570077222354bcbd1ec53af 100644 (file)
@@ -295,6 +295,12 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
                return -EINVAL;
        }
 
+       if ((buffer->flags & ION_FLAG_NOZEROED) != 0) {
+               pr_err("%s: mmap() to nozeroed buffer is not allowed\n",
+                      __func__);
+               return -EACCES;
+       }
+
        if (!(buffer->flags & ION_FLAG_CACHED))
                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
index 6936bef7175331871c7c348fe4f830554c0b34aa..69f3ee43384ae908de80011fa4b708d5f28d42f1 100644 (file)
@@ -341,7 +341,7 @@ struct ion_page_pool {
 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
                                           bool cached);
 void ion_page_pool_destroy(struct ion_page_pool *pool);
-struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
+struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool nozero);
 void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
 
 /** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
index 1437078cf9c6f682fb4119911d399396660c4475..62c5b4b860e52734c564ed6d540c63b6b78a5b52 100644 (file)
@@ -46,6 +46,8 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
        unsigned long size = PAGE_ALIGN(len);
        unsigned long nr_pages = size >> PAGE_SHIFT;
        unsigned long align = cma_heap->align_order;
+       bool cacheflush = !(flags & ION_FLAG_CACHED) ||
+                         ((flags & ION_FLAG_SYNC_FORCE) != 0);
        int ret = -ENOMEM;
 
        pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
@@ -55,20 +57,22 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
                return -ENOMEM;
        }
 
-       if (PageHighMem(pages)) {
-               unsigned long nr_clear_pages = nr_pages;
-               struct page *page = pages;
-
-               while (nr_clear_pages > 0) {
-                       void *vaddr = kmap_atomic(page);
-
-                       memset(vaddr, 0, PAGE_SIZE);
-                       kunmap_atomic(vaddr);
-                       page++;
-                       nr_clear_pages--;
+       if (!(flags & ION_FLAG_NOZEROED)) {
+               if (PageHighMem(pages)) {
+                       unsigned long nr_clear_pages = nr_pages;
+                       struct page *page = pages;
+
+                       while (nr_clear_pages > 0) {
+                               void *vaddr = kmap_atomic(page);
+
+                               memset(vaddr, 0, PAGE_SIZE);
+                               kunmap_atomic(vaddr);
+                               page++;
+                               nr_clear_pages--;
+                       }
+               } else {
+                       memset(page_address(pages), 0, size);
                }
-       } else {
-               memset(page_address(pages), 0, size);
        }
 
        table = kmalloc(sizeof(*table), GFP_KERNEL);
@@ -84,7 +88,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
        buffer->priv_virt = pages;
        buffer->sg_table = table;
 
-       if (!(flags & ION_FLAG_CACHED))
+       if (cacheflush)
                __flush_dcache_area(page_to_virt(pages), len);
 
        return 0;
index 184fc29417f73d6567d438c85eef8db1a908d82b..98df137319c0c0c575b0a9c0eb040fcb78b8a6dc 100644 (file)
 
 #include "ion.h"
 
-static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool, bool nozero)
 {
-       struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+       gfp_t gfpmask = pool->gfp_mask;
+       struct page *page;
+
+       if (nozero)
+               gfpmask &= ~__GFP_ZERO;
 
+       page = alloc_pages(gfpmask, pool->order);
        if (!page)
                return NULL;
        return page;
@@ -74,7 +79,7 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
        return page;
 }
 
-struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
+struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool nozero)
 {
        struct page *page = NULL;
 
@@ -88,7 +93,7 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
        mutex_unlock(&pool->mutex);
 
        if (!page) {
-               page = ion_page_pool_alloc_pages(pool);
+               page = ion_page_pool_alloc_pages(pool, nozero);
                if (!pool->cached)
                        __flush_dcache_area(page_to_virt(page),
                                            1 << (PAGE_SHIFT + pool->order));
index 09ae4bca428e946e339b6a9c480ef584953e6ae2..b5c3bce9feb361443aae958918c1ef8890d18487 100644 (file)
@@ -64,15 +64,17 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
                                      unsigned long order)
 {
        bool cached = ion_buffer_cached(buffer);
+       bool nozero = buffer->flags & ION_FLAG_NOZEROED;
+       bool cleancache = buffer->flags & ION_FLAG_SYNC_FORCE;
        struct ion_page_pool *pool;
        struct page *page;
 
-       if (!cached)
+       if (!cached || cleancache)
                pool = heap->uncached_pools[order_to_index(order)];
        else
                pool = heap->cached_pools[order_to_index(order)];
 
-       page = ion_page_pool_alloc(pool);
+       page = ion_page_pool_alloc(pool, nozero);
 
        return page;
 }
index 9e21451149d06fed279289d87c3a7d29163a6ed9..f6dece538655a6ebc66ff329a96d881d057a8de8 100644 (file)
@@ -56,6 +56,18 @@ enum ion_heap_type {
  * when the buffer is mapped for dma
  */
 #define ION_FLAG_CACHED 1
+/*
+ * the allocated buffer is not cleared with zeroes to avoid initialization
+ * overhead. Mapping to userspace is not allowed.
+ */
+#define ION_FLAG_NOZEROED 8
+/*
+ * the allocated buffer does not have dirty cache line allocated. In other
+ * words, ION flushes the cache even though allocation flags includes
+ * ION_FLAG_CACHED. This is required by some H/W drivers that wants to reduce
+ * overhead by explicit cache maintenance.
+ */
+#define ION_FLAG_SYNC_FORCE 32
 
 /**
  * DOC: Ion Userspace API