obj-$(CONFIG_ION_CHUNK_HEAP) += ion_chunk_heap.o
obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o
obj-$(CONFIG_ION_TEST) += ion_test.o
-obj-$(CONFIG_ION_EXYNOS) += ion_fdt_exynos.o
+obj-$(CONFIG_ION_EXYNOS) += ion_fdt_exynos.o ion_buffer_protect.o
return -EACCES;
}
+ if ((buffer->flags & ION_FLAG_PROTECTED) != 0) {
+ pr_err("%s: mmap() to protected buffer is not allowed\n",
+ __func__);
+ return -EACCES;
+ }
+
if (!(buffer->flags & ION_FLAG_CACHED))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
phys_addr_t base;
size_t size;
phys_addr_t align;
+ bool secure;
bool untouchable;
};
--- /dev/null
+/*
+ * drivers/staging/android/ion/ion_buffer_protect.c
+ *
+ * Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "ion_exynos.h"
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+
+#define ION_SECURE_DMA_BASE 0x80000000
+#define ION_SECURE_DMA_END 0xE0000000
+
+static struct gen_pool *secure_iova_pool;
+static DEFINE_SPINLOCK(siova_pool_lock);
+
+static int ion_secure_iova_alloc(unsigned long *addr, unsigned long size,
+ unsigned int align)
+{
+ unsigned int out_addr;
+
+ if (!secure_iova_pool) {
+ pr_err("%s: Secure IOVA pool is not created\n", __func__);
+ return -ENODEV;
+ }
+
+ spin_lock(&siova_pool_lock);
+ if (align > PAGE_SIZE) {
+ gen_pool_set_algo(secure_iova_pool,
+ find_first_fit_with_align, &align);
+ out_addr = gen_pool_alloc(secure_iova_pool, size);
+ gen_pool_set_algo(secure_iova_pool, NULL, NULL);
+ } else {
+ out_addr = gen_pool_alloc(secure_iova_pool, size);
+ }
+ spin_unlock(&siova_pool_lock);
+
+ if (out_addr == 0) {
+ pr_err("%s: failed alloc secure iova. %zu/%zu bytes used\n",
+ __func__, gen_pool_avail(secure_iova_pool),
+ gen_pool_size(secure_iova_pool));
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ion_secure_iova_free(unsigned long addr, unsigned long size)
+{
+ if (!secure_iova_pool) {
+ pr_err("%s: Secure IOVA pool is not created\n", __func__);
+ return;
+ }
+
+ spin_lock(&siova_pool_lock);
+ gen_pool_free(secure_iova_pool, addr, size);
+ spin_unlock(&siova_pool_lock);
+}
+
+int __init ion_secure_iova_pool_create(void)
+{
+ secure_iova_pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!secure_iova_pool) {
+ pr_err("%s: failed to create Secure IOVA pool\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (gen_pool_add(secure_iova_pool, ION_SECURE_DMA_BASE,
+ ION_SECURE_DMA_END - ION_SECURE_DMA_BASE, -1)) {
+ pr_err("%s: failed to set address range of Secure IOVA pool\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ion_secure_protect(struct ion_buffer_prot_info *protdesc,
+ unsigned int protalign)
+{
+ unsigned long size = protdesc->chunk_count * protdesc->chunk_size;
+ unsigned long dma_addr = 0;
+ drmdrv_result_t drmret = DRMDRV_OK;
+ int ret;
+
+ ret = ion_secure_iova_alloc(&dma_addr, size, max(protalign, PAGE_SIZE));
+ if (ret)
+ goto err_iova;
+
+ prot->dma_addr = (unsigned int)dma_addr;
+
+ __flush_dcache_area(prot, sizeof(*protdesc));
+ if (protdesc->chunk_count > 1)
+ __flush_dcache_area(phys_to_virt(protdesc->bus_address),
+ sizeof(unsigned long) * protdesc->chunk_count);
+
+ drmret = exynos_smc(SMC_DRM_PPMP_PROT, virt_to_phys(protdesc), 0, 0);
+ if (drmret != DRMDRV_OK) {
+ ret = -EACCES;
+ goto err_smc;
+ }
+
+ return 0;
+err_smc:
+ ion_secure_iova_free(dma_addr, size);
+err_iova:
+ pr_err("%s: PROT:%d (err=%d,va=%#lx,len=%#lx,cnt=%u,flg=%u)\n",
+ __func__, SMC_DRM_PPMP_PROT, drmret, dma_addr, size,
+ protdesc->chunk_count, protdesc->flags);
+
+ return ret;
+}
+
+static int ion_secure_unprotect(struct ion_buffer_prot_info *protdesc)
+{
+ unsigned long size = protdesc->chunk_count * protdesc->chunk_size;
+ int ret;
+ /*
+ * No need to flush protdesc for unprotection because it is never
+ * modified since the buffer is protected.
+ */
+ ret = exynos_smc(SMC_DRM_PPMP_UNPROT, virt_to_phys(protdesc), 0, 0);
+
+ ion_secure_iova_free(info->prot_desc.dma_addr, size);
+
+ if (ret != DRMDRV_OK) {
+ pr_err("%s: UNPROT:%d(err=%d,va=%#lx,len=%#lx,cnt=%u,flg=%u)\n",
+ __func__, SMC_DRM_PPMP_UNPROT, ret, protdesc->dma_addr,
+ size, protdesc->chunk_count, protdesc->flags);
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+#else /* !CONFIG_EXYNOS_CONTENT_PATH_PROTECTION */
+
+static int ion_secure_protect(struct ion_buffer_prot_info *prot,
+ unsigned int protalign)
+{
+ return -ENODEV;
+}
+
+static int ion_secure_unprotect(struct ion_buffer_prot_info *prot)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_EXYNOS_CONTENT_PATH_PROTECTION */
+
+void *ion_buffer_protect_single(unsigned int protection_id, unsigned int size,
+ unsigned long phys, unsigned int protalign)
+{
+ struct ion_buffer_prot_info *protdesc;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION))
+ return NULL;
+
+ protdesc = kmalloc(sizeof(*protdesc), GFP_KERNEL);
+ if (!protdesc)
+ return ERR_PTR(-ENOMEM);
+
+ protdesc->chunk_count = 1,
+ protdesc->flags = protection_id;
+ protdesc->chunk_size = size;
+ protdesc->bus_address = phys;
+
+ ret = ion_secure_protect(protdesc, protalign);
+ if (ret) {
+ kfree(protdesc);
+ return ERR_PTR(ret);
+ }
+
+ return protdesc;
+}
+
+void ion_buffer_unprotect(void *priv)
+{
+ struct ion_buffer_prot_info *protdesc = priv;
+
+ if (priv) {
+ ion_secure_unprotect(protdesc);
+ kfree(protdesc);
+ }
+}
#include <asm/cacheflush.h>
#include "ion.h"
+#include "ion_exynos.h"
#define ION_CARVEOUT_ALLOCATE_FAIL -1
phys_addr_t base;
size_t size;
size_t alloc_align;
+ unsigned int protection_id;
+ bool secure;
bool untouchable;
};
-static phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+static phys_addr_t ion_carveout_allocate(struct ion_carveout_heap *heap,
unsigned long size)
{
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
- unsigned long offset = gen_pool_alloc(carveout_heap->pool,
- ALIGN(size, carveout_heap->alloc_align));
+ unsigned long offset = gen_pool_alloc(heap->pool, size);
if (!offset)
return ION_CARVEOUT_ALLOCATE_FAIL;
unsigned long size,
unsigned long flags)
{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
struct sg_table *table;
+ unsigned long aligned_size = ALIGN(size, carveout_heap->alloc_align);
phys_addr_t paddr;
int ret;
+ if (carveout_heap->untouchable && !(flags & ION_FLAG_PROTECTED)) {
+ pr_err("%s: ION_FLAG_PROTECTED needed by untouchable heap %s\n",
+ __func__, heap->name);
+ return -EACCES;
+ }
+
table = kmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
if (ret)
goto err_free;
- paddr = ion_carveout_allocate(heap, size);
+ paddr = ion_carveout_allocate(carveout_heap, aligned_size);
if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
ret = -ENOMEM;
goto err_free_table;
sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
buffer->sg_table = table;
- return 0;
+ if (carveout_heap->secure && (flags & ION_FLAG_PROTECTED)) {
+ buffer->priv_virt = ion_buffer_protect_single(
+ carveout_heap->protection_id,
+ (unsigned int)aligned_size,
+ paddr,
+ carveout_heap->alloc_align);
+ if (IS_ERR(buffer->priv_virt))
+ goto err_prot;
+ }
+ return 0;
+err_prot:
err_free_table:
sg_free_table(table);
err_free:
struct page *page = sg_page(table->sgl);
phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+ if (carveout_heap->secure && (buffer->flags & ION_FLAG_PROTECTED))
+ ion_buffer_unprotect(buffer->priv_virt);
+
if (!carveout_heap->untouchable) {
ion_heap_buffer_zero(buffer);
/* the free pages in carveout pool should be cache cold */
}
carveout_heap->size = heap_data->size;
carveout_heap->alloc_align = heap_data->align;
+ carveout_heap->protection_id = heap_data->id;
+ carveout_heap->secure = heap_data->secure;
carveout_heap->untouchable = heap_data->untouchable;
return &carveout_heap->heap;
#include <asm/cacheflush.h>
#include "ion.h"
+#include "ion_exynos.h"
struct ion_cma_heap {
struct ion_heap heap;
struct cma *cma;
unsigned int align_order;
+ unsigned int protection_id;
+ bool secure;
};
#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
unsigned long align = cma_heap->align_order;
bool cacheflush = !(flags & ION_FLAG_CACHED) ||
((flags & ION_FLAG_SYNC_FORCE) != 0);
+ bool protected = cma_heap->secure && (flags & ION_FLAG_PROTECTED);
int ret = -ENOMEM;
pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
sg_set_page(table->sgl, pages, size, 0);
- buffer->priv_virt = pages;
buffer->sg_table = table;
- if (cacheflush)
+ if (cacheflush || protected)
__flush_dcache_area(page_to_virt(pages), len);
- return 0;
+ if (protected) {
+ buffer->priv_virt = ion_buffer_protect_single(
+ cma_heap->protection_id,
+ (unsigned int)len,
+ page_to_phys(pages),
+ PAGE_SIZE << cma_heap->align_order);
+ if (IS_ERR(buffer->priv_virt))
+ goto err_prot;
+ }
+ return 0;
+err_prot:
+ sg_free_table(buffer->sg_table);
free_mem:
kfree(table);
err:
static void ion_cma_free(struct ion_buffer *buffer)
{
struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
- struct page *pages = buffer->priv_virt;
unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+ bool protected = cma_heap->secure &&
+ (buffer->flags & ION_FLAG_PROTECTED);
+ if (protected)
+ ion_buffer_unprotect(buffer->priv_virt);
/* release memory */
- cma_release(cma_heap->cma, pages, nr_pages);
+ cma_release(cma_heap->cma, sg_page(buffer->sg_table->sgl), nr_pages);
/* release sg table */
sg_free_table(buffer->sg_table);
kfree(buffer->sg_table);
cma_heap->heap.name = kstrndup(heap_data->name,
MAX_HEAP_NAME - 1, GFP_KERNEL);
cma_heap->align_order = get_order(heap_data->align);
+ cma_heap->secure = heap_data->secure;
+ cma_heap->protection_id = heap_data->id;
return &cma_heap->heap;
}
struct ion_heap;
struct ion_platform_heap;
+/**
+ * struct ion_buffer_prot_info - buffer protection information
+ * @chunk_count: number of physically contiguous memory chunks to protect
+ * each chunk should has the same size.
+ * @dma_addr: device virtual address for protected memory access
+ * @flags: protection flags but actually, protection_id
+ * @chunk_size: length in bytes of each chunk.
+ * @bus_address: if @chunk_count is 1, this is the physical address the chunk.
+ * if @chunk_count > 1, this is the physical address of unsigned
+ * long array of @chunk_count elements that contains the physical
+ * address of each chunk.
+ */
+struct ion_buffer_prot_info {
+ unsigned int chunk_count;
+ unsigned int dma_addr;
+ unsigned int flags;
+ unsigned int chunk_size;
+ unsigned long bus_address;
+};
+
#ifdef CONFIG_ION_CARVEOUT_HEAP
extern struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
#else
#define ion_cma_heap_create(cma, p) ERR_PTR(-ENODEV)
#endif
+#if defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) && defined(CONFIG_ION_EXYNOS)
+int __init ion_secure_iova_pool_create(void)
+#else
+static inline int ion_secure_iova_pool_create(void)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_ION_EXYNOS
+void *ion_buffer_protect_single(unsigned int protection_id, unsigned int size,
+ unsigned long phys, unsigned int protalign);
+void ion_buffer_unprotect(void *priv);
+#else
+static inline void *ion_buffer_protect_single(unsigned int protection_id,
+ unsigned int size,
+ unsigned long phys,
+ unsigned int protalign)
+{
+ return NULL;
+}
+#define ion_buffer_unprotect(priv) do { } while (0)
+#endif
+
#endif /* _ION_EXYNOS_H_ */
phys_addr_t base;
phys_addr_t size;
unsigned int alloc_align;
+ unsigned int protection_id;
+ bool secure;
bool untouchable;
} ion_reserved_mem[ION_NUM_HEAP_IDS - 1] __initdata;
static int __init exynos_ion_reserved_mem_setup(struct reserved_mem *rmem)
{
- bool untch, reusable;
+ bool untch, reusable, secure;
size_t alloc_align = PAGE_SIZE;
char *heapname;
const __be32 *prop;
+ __u32 protection_id = 0;
int len;
reusable = !!of_get_flat_dt_prop(rmem->fdt_node, "ion,reusable", NULL);
untch = !!of_get_flat_dt_prop(rmem->fdt_node, "ion,untouchable", NULL);
+ secure = !!of_get_flat_dt_prop(rmem->fdt_node, "ion,secure", NULL);
+
+ prop = of_get_flat_dt_prop(rmem->fdt_node, "ion,protection_id", &len);
+ if (prop)
+ protection_id = be32_to_cpu(prop[0]);
prop = of_get_flat_dt_prop(rmem->fdt_node, "ion,alignment", &len);
if (prop && (be32_to_cpu(prop[0]) >= PAGE_SIZE)) {
ion_reserved_mem[reserved_mem_count].size = rmem->size;
ion_reserved_mem[reserved_mem_count].heapname = heapname;
ion_reserved_mem[reserved_mem_count].alloc_align = alloc_align;
+ ion_reserved_mem[reserved_mem_count].protection_id = protection_id;
+ ion_reserved_mem[reserved_mem_count].secure = secure;
ion_reserved_mem[reserved_mem_count].untouchable = untch;
reserved_mem_count++;
static int __init exynos_ion_register_heaps(void)
{
unsigned int i;
+ bool secure = false;
for (i = 0; i < reserved_mem_count; i++) {
struct ion_platform_heap pheap;
struct ion_heap *heap;
pheap.name = ion_reserved_mem[i].heapname;
+ pheap.id = ion_reserved_mem[i].protection_id;
pheap.base = ion_reserved_mem[i].base;
pheap.size = ion_reserved_mem[i].size;
pheap.align = ion_reserved_mem[i].alloc_align;
+ pheap.secure = ion_reserved_mem[i].secure;
pheap.untouchable = ion_reserved_mem[i].untouchable;
if (ion_reserved_mem[i].cma) {
ion_device_add_heap(heap);
pr_info("ION: registered '%s' heap\n", pheap.name);
+
+ if (pheap.secure)
+ secure = true;
}
+ /*
+ * ion_secure_iova_pool_create() should success. If it fails, it is
+ * because of design flaw or out of memory. Nothing to do with the
+ * failure. Just debug. ion_secure_iova_pool_create() disables
+ * protection if it fails.
+ */
+ if (secure)
+ ion_secure_iova_pool_create();
+
return 0;
}
device_initcall(exynos_ion_register_heaps);
* overhead. Mapping to userspace is not allowed.
*/
#define ION_FLAG_NOZEROED 8
+/*
+ * the allocated buffer is not allowed to access without a proper permission.
+ * Both of mmap() and dmabuf kmap/vmap will fail. Acessing by any other mapping
+ * will generate data abort exception and get oops.
+ * ION_FLAG_PROTECTED is only applicable to the heaps with security property.
+ * Other heaps ignore this flag.
+ */
+#define ION_FLAG_PROTECTED 16
/*
* the allocated buffer does not have dirty cache line allocated. In other
* words, ION flushes the cache even though allocation flags includes