return -EINVAL;
}
+ if ((buffer->flags & ION_FLAG_NOZEROED) != 0) {
+ pr_err("%s: mmap() to nozeroed buffer is not allowed\n",
+ __func__);
+ return -EACCES;
+ }
+
if (!(buffer->flags & ION_FLAG_CACHED))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
bool cached);
void ion_page_pool_destroy(struct ion_page_pool *pool);
-struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
+struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool nozero);
void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
unsigned long size = PAGE_ALIGN(len);
unsigned long nr_pages = size >> PAGE_SHIFT;
unsigned long align = cma_heap->align_order;
+ bool cacheflush = !(flags & ION_FLAG_CACHED) ||
+ ((flags & ION_FLAG_SYNC_FORCE) != 0);
int ret = -ENOMEM;
pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
return -ENOMEM;
}
- if (PageHighMem(pages)) {
- unsigned long nr_clear_pages = nr_pages;
- struct page *page = pages;
-
- while (nr_clear_pages > 0) {
- void *vaddr = kmap_atomic(page);
-
- memset(vaddr, 0, PAGE_SIZE);
- kunmap_atomic(vaddr);
- page++;
- nr_clear_pages--;
+ if (!(flags & ION_FLAG_NOZEROED)) {
+ if (PageHighMem(pages)) {
+ unsigned long nr_clear_pages = nr_pages;
+ struct page *page = pages;
+
+ while (nr_clear_pages > 0) {
+ void *vaddr = kmap_atomic(page);
+
+ memset(vaddr, 0, PAGE_SIZE);
+ kunmap_atomic(vaddr);
+ page++;
+ nr_clear_pages--;
+ }
+ } else {
+ memset(page_address(pages), 0, size);
}
- } else {
- memset(page_address(pages), 0, size);
}
table = kmalloc(sizeof(*table), GFP_KERNEL);
buffer->priv_virt = pages;
buffer->sg_table = table;
- if (!(flags & ION_FLAG_CACHED))
+ if (cacheflush)
__flush_dcache_area(page_to_virt(pages), len);
return 0;
#include "ion.h"
-static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool, bool nozero)
{
- struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+ gfp_t gfpmask = pool->gfp_mask;
+ struct page *page;
+
+ if (nozero)
+ gfpmask &= ~__GFP_ZERO;
+ page = alloc_pages(gfpmask, pool->order);
if (!page)
return NULL;
return page;
return page;
}
-struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
+struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool nozero)
{
struct page *page = NULL;
mutex_unlock(&pool->mutex);
if (!page) {
- page = ion_page_pool_alloc_pages(pool);
+ page = ion_page_pool_alloc_pages(pool, nozero);
if (!pool->cached)
__flush_dcache_area(page_to_virt(page),
1 << (PAGE_SHIFT + pool->order));
unsigned long order)
{
bool cached = ion_buffer_cached(buffer);
+ bool nozero = buffer->flags & ION_FLAG_NOZEROED;
+ bool cleancache = buffer->flags & ION_FLAG_SYNC_FORCE;
struct ion_page_pool *pool;
struct page *page;
- if (!cached)
+ if (!cached || cleancache)
pool = heap->uncached_pools[order_to_index(order)];
else
pool = heap->cached_pools[order_to_index(order)];
- page = ion_page_pool_alloc(pool);
+ page = ion_page_pool_alloc(pool, nozero);
return page;
}
* when the buffer is mapped for dma
*/
#define ION_FLAG_CACHED 1
+/*
+ * the allocated buffer is not cleared with zeroes to avoid initialization
+ * overhead. Mapping to userspace is not allowed.
+ */
+#define ION_FLAG_NOZEROED 8
+/*
+ * the allocated buffer does not have dirty cache line allocated. In other
+ * words, ION flushes the cache even though allocation flags includes
+ * ION_FLAG_CACHED. This is required by some H/W drivers that wants to reduce
+ * overhead by explicit cache maintenance.
+ */
+#define ION_FLAG_SYNC_FORCE 32
/**
* DOC: Ion Userspace API