From: Cho KyongHo Date: Sun, 11 Feb 2018 08:08:35 +0000 (+0900) Subject: android: ion: add buffer protection X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=d6061861f64f1e9ff821d1ddc054c50e64142f35;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git android: ion: add buffer protection DRM video contents need protection of the decrypted video stream from copying to insecure buffer. For the protection from copying, Exynos SoC provides H/W based protection. Since H/W has limitation of resources, the H/W needs the types of buffer protection, some special limitation to alignments and limits to the memory pool to serve buffer protection. Every memory pool needs buffer protection should be specified in the flattened device tree under 'reserved-memory' node like cma heaps and carveout heaps. The heaps needs protection should have 'ion,secure' boolean property that specifies buffers from the heap may needs buffer protection. They also need 'ion,protection_id' which specifies the types of buffer protection. Buffers from the secure heaps are not protected unless users gives ION ION_FLAG_PROTECTED flag. non-secure heaps just ignore the flag. Change-Id: Ibef18c3fde7d628c2298abb95e71379c67cf7471 Signed-off-by: Cho KyongHo --- diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile index 7b46caaab30b..9a8c095bd5e4 100644 --- a/drivers/staging/android/ion/Makefile +++ b/drivers/staging/android/ion/Makefile @@ -5,4 +5,4 @@ obj-$(CONFIG_ION_CARVEOUT_HEAP) += ion_carveout_heap.o obj-$(CONFIG_ION_CHUNK_HEAP) += ion_chunk_heap.o obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o obj-$(CONFIG_ION_TEST) += ion_test.o -obj-$(CONFIG_ION_EXYNOS) += ion_fdt_exynos.o +obj-$(CONFIG_ION_EXYNOS) += ion_fdt_exynos.o ion_buffer_protect.o diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 79522c7735ad..298edb9ffc45 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -301,6 +301,12 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) return -EACCES; } + if ((buffer->flags & ION_FLAG_PROTECTED) != 0) { + pr_err("%s: mmap() to protected buffer is not allowed\n", + __func__); + return -EACCES; + } + if (!(buffer->flags & ION_FLAG_CACHED)) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h index 69f3ee43384a..ebbcb80521c6 100644 --- a/drivers/staging/android/ion/ion.h +++ b/drivers/staging/android/ion/ion.h @@ -50,6 +50,7 @@ struct ion_platform_heap { phys_addr_t base; size_t size; phys_addr_t align; + bool secure; bool untouchable; }; diff --git a/drivers/staging/android/ion/ion_buffer_protect.c b/drivers/staging/android/ion/ion_buffer_protect.c new file mode 100644 index 000000000000..30bce3f10a3a --- /dev/null +++ b/drivers/staging/android/ion/ion_buffer_protect.c @@ -0,0 +1,198 @@ +/* + * drivers/staging/android/ion/ion_buffer_protect.c + * + * Copyright (C) 2018 Samsung Electronics Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include + +#include "ion_exynos.h" + +#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION + +#define ION_SECURE_DMA_BASE 0x80000000 +#define ION_SECURE_DMA_END 0xE0000000 + +static struct gen_pool *secure_iova_pool; +static DEFINE_SPINLOCK(siova_pool_lock); + +static int ion_secure_iova_alloc(unsigned long *addr, unsigned long size, + unsigned int align) +{ + unsigned int out_addr; + + if (!secure_iova_pool) { + pr_err("%s: Secure IOVA pool is not created\n", __func__); + return -ENODEV; + } + + spin_lock(&siova_pool_lock); + if (align > PAGE_SIZE) { + gen_pool_set_algo(secure_iova_pool, + find_first_fit_with_align, &align); + out_addr = gen_pool_alloc(secure_iova_pool, size); + gen_pool_set_algo(secure_iova_pool, NULL, NULL); + } else { + out_addr = gen_pool_alloc(secure_iova_pool, size); + } + spin_unlock(&siova_pool_lock); + + if (out_addr == 0) { + pr_err("%s: failed alloc secure iova. %zu/%zu bytes used\n", + __func__, gen_pool_avail(secure_iova_pool), + gen_pool_size(secure_iova_pool)); + return -ENOMEM; + } + + return 0; +} + +void ion_secure_iova_free(unsigned long addr, unsigned long size) +{ + if (!secure_iova_pool) { + pr_err("%s: Secure IOVA pool is not created\n", __func__); + return; + } + + spin_lock(&siova_pool_lock); + gen_pool_free(secure_iova_pool, addr, size); + spin_unlock(&siova_pool_lock); +} + +int __init ion_secure_iova_pool_create(void) +{ + secure_iova_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!secure_iova_pool) { + pr_err("%s: failed to create Secure IOVA pool\n", __func__); + return -ENOMEM; + } + + if (gen_pool_add(secure_iova_pool, ION_SECURE_DMA_BASE, + ION_SECURE_DMA_END - ION_SECURE_DMA_BASE, -1)) { + pr_err("%s: failed to set address range of Secure IOVA pool\n", + __func__); + return -ENOMEM; + } + + return 0; +} + +static int ion_secure_protect(struct ion_buffer_prot_info *protdesc, + unsigned int protalign) +{ + unsigned long size = protdesc->chunk_count * protdesc->chunk_size; + unsigned long dma_addr = 0; + drmdrv_result_t drmret = DRMDRV_OK; + int ret; + + ret = ion_secure_iova_alloc(&dma_addr, size, max(protalign, PAGE_SIZE)); + if (ret) + goto err_iova; + + prot->dma_addr = (unsigned int)dma_addr; + + __flush_dcache_area(prot, sizeof(*protdesc)); + if (protdesc->chunk_count > 1) + __flush_dcache_area(phys_to_virt(protdesc->bus_address), + sizeof(unsigned long) * protdesc->chunk_count); + + drmret = exynos_smc(SMC_DRM_PPMP_PROT, virt_to_phys(protdesc), 0, 0); + if (drmret != DRMDRV_OK) { + ret = -EACCES; + goto err_smc; + } + + return 0; +err_smc: + ion_secure_iova_free(dma_addr, size); +err_iova: + pr_err("%s: PROT:%d (err=%d,va=%#lx,len=%#lx,cnt=%u,flg=%u)\n", + __func__, SMC_DRM_PPMP_PROT, drmret, dma_addr, size, + protdesc->chunk_count, protdesc->flags); + + return ret; +} + +static int ion_secure_unprotect(struct ion_buffer_prot_info *protdesc) +{ + unsigned long size = protdesc->chunk_count * protdesc->chunk_size; + int ret; + /* + * No need to flush protdesc for unprotection because it is never + * modified since the buffer is protected. + */ + ret = exynos_smc(SMC_DRM_PPMP_UNPROT, virt_to_phys(protdesc), 0, 0); + + ion_secure_iova_free(info->prot_desc.dma_addr, size); + + if (ret != DRMDRV_OK) { + pr_err("%s: UNPROT:%d(err=%d,va=%#lx,len=%#lx,cnt=%u,flg=%u)\n", + __func__, SMC_DRM_PPMP_UNPROT, ret, protdesc->dma_addr, + size, protdesc->chunk_count, protdesc->flags); + return -EACCES; + } + + return 0; +} + +#else /* !CONFIG_EXYNOS_CONTENT_PATH_PROTECTION */ + +static int ion_secure_protect(struct ion_buffer_prot_info *prot, + unsigned int protalign) +{ + return -ENODEV; +} + +static int ion_secure_unprotect(struct ion_buffer_prot_info *prot) +{ + return -ENODEV; +} + +#endif /* CONFIG_EXYNOS_CONTENT_PATH_PROTECTION */ + +void *ion_buffer_protect_single(unsigned int protection_id, unsigned int size, + unsigned long phys, unsigned int protalign) +{ + struct ion_buffer_prot_info *protdesc; + int ret; + + if (!IS_ENABLED(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION)) + return NULL; + + protdesc = kmalloc(sizeof(*protdesc), GFP_KERNEL); + if (!protdesc) + return ERR_PTR(-ENOMEM); + + protdesc->chunk_count = 1, + protdesc->flags = protection_id; + protdesc->chunk_size = size; + protdesc->bus_address = phys; + + ret = ion_secure_protect(protdesc, protalign); + if (ret) { + kfree(protdesc); + return ERR_PTR(ret); + } + + return protdesc; +} + +void ion_buffer_unprotect(void *priv) +{ + struct ion_buffer_prot_info *protdesc = priv; + + if (priv) { + ion_secure_unprotect(protdesc); + kfree(protdesc); + } +} diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c index 41236d49b906..be9d66bab790 100644 --- a/drivers/staging/android/ion/ion_carveout_heap.c +++ b/drivers/staging/android/ion/ion_carveout_heap.c @@ -26,6 +26,7 @@ #include #include "ion.h" +#include "ion_exynos.h" #define ION_CARVEOUT_ALLOCATE_FAIL -1 @@ -35,16 +36,15 @@ struct ion_carveout_heap { phys_addr_t base; size_t size; size_t alloc_align; + unsigned int protection_id; + bool secure; bool untouchable; }; -static phys_addr_t ion_carveout_allocate(struct ion_heap *heap, +static phys_addr_t ion_carveout_allocate(struct ion_carveout_heap *heap, unsigned long size) { - struct ion_carveout_heap *carveout_heap = - container_of(heap, struct ion_carveout_heap, heap); - unsigned long offset = gen_pool_alloc(carveout_heap->pool, - ALIGN(size, carveout_heap->alloc_align)); + unsigned long offset = gen_pool_alloc(heap->pool, size); if (!offset) return ION_CARVEOUT_ALLOCATE_FAIL; @@ -66,10 +66,19 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap, unsigned long size, unsigned long flags) { + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); struct sg_table *table; + unsigned long aligned_size = ALIGN(size, carveout_heap->alloc_align); phys_addr_t paddr; int ret; + if (carveout_heap->untouchable && !(flags & ION_FLAG_PROTECTED)) { + pr_err("%s: ION_FLAG_PROTECTED needed by untouchable heap %s\n", + __func__, heap->name); + return -EACCES; + } + table = kmalloc(sizeof(*table), GFP_KERNEL); if (!table) return -ENOMEM; @@ -77,7 +86,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap, if (ret) goto err_free; - paddr = ion_carveout_allocate(heap, size); + paddr = ion_carveout_allocate(carveout_heap, aligned_size); if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) { ret = -ENOMEM; goto err_free_table; @@ -86,8 +95,18 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap, sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0); buffer->sg_table = table; - return 0; + if (carveout_heap->secure && (flags & ION_FLAG_PROTECTED)) { + buffer->priv_virt = ion_buffer_protect_single( + carveout_heap->protection_id, + (unsigned int)aligned_size, + paddr, + carveout_heap->alloc_align); + if (IS_ERR(buffer->priv_virt)) + goto err_prot; + } + return 0; +err_prot: err_free_table: sg_free_table(table); err_free: @@ -103,6 +122,9 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer) struct page *page = sg_page(table->sgl); phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + if (carveout_heap->secure && (buffer->flags & ION_FLAG_PROTECTED)) + ion_buffer_unprotect(buffer->priv_virt); + if (!carveout_heap->untouchable) { ion_heap_buffer_zero(buffer); /* the free pages in carveout pool should be cache cold */ @@ -191,6 +213,8 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) } carveout_heap->size = heap_data->size; carveout_heap->alloc_align = heap_data->align; + carveout_heap->protection_id = heap_data->id; + carveout_heap->secure = heap_data->secure; carveout_heap->untouchable = heap_data->untouchable; return &carveout_heap->heap; diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index 62c5b4b860e5..8b9e3a474be5 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -26,11 +26,14 @@ #include #include "ion.h" +#include "ion_exynos.h" struct ion_cma_heap { struct ion_heap heap; struct cma *cma; unsigned int align_order; + unsigned int protection_id; + bool secure; }; #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) @@ -48,6 +51,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, unsigned long align = cma_heap->align_order; bool cacheflush = !(flags & ION_FLAG_CACHED) || ((flags & ION_FLAG_SYNC_FORCE) != 0); + bool protected = cma_heap->secure && (flags & ION_FLAG_PROTECTED); int ret = -ENOMEM; pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL); @@ -85,14 +89,24 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, sg_set_page(table->sgl, pages, size, 0); - buffer->priv_virt = pages; buffer->sg_table = table; - if (cacheflush) + if (cacheflush || protected) __flush_dcache_area(page_to_virt(pages), len); - return 0; + if (protected) { + buffer->priv_virt = ion_buffer_protect_single( + cma_heap->protection_id, + (unsigned int)len, + page_to_phys(pages), + PAGE_SIZE << cma_heap->align_order); + if (IS_ERR(buffer->priv_virt)) + goto err_prot; + } + return 0; +err_prot: + sg_free_table(buffer->sg_table); free_mem: kfree(table); err: @@ -103,11 +117,14 @@ err: static void ion_cma_free(struct ion_buffer *buffer) { struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); - struct page *pages = buffer->priv_virt; unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; + bool protected = cma_heap->secure && + (buffer->flags & ION_FLAG_PROTECTED); + if (protected) + ion_buffer_unprotect(buffer->priv_virt); /* release memory */ - cma_release(cma_heap->cma, pages, nr_pages); + cma_release(cma_heap->cma, sg_page(buffer->sg_table->sgl), nr_pages); /* release sg table */ sg_free_table(buffer->sg_table); kfree(buffer->sg_table); @@ -137,6 +154,8 @@ struct ion_heap *ion_cma_heap_create(struct cma *cma, cma_heap->heap.name = kstrndup(heap_data->name, MAX_HEAP_NAME - 1, GFP_KERNEL); cma_heap->align_order = get_order(heap_data->align); + cma_heap->secure = heap_data->secure; + cma_heap->protection_id = heap_data->id; return &cma_heap->heap; } diff --git a/drivers/staging/android/ion/ion_exynos.h b/drivers/staging/android/ion/ion_exynos.h index c9c07ee8506a..d2ee7620619e 100644 --- a/drivers/staging/android/ion/ion_exynos.h +++ b/drivers/staging/android/ion/ion_exynos.h @@ -21,6 +21,26 @@ struct cma; struct ion_heap; struct ion_platform_heap; +/** + * struct ion_buffer_prot_info - buffer protection information + * @chunk_count: number of physically contiguous memory chunks to protect + * each chunk should has the same size. + * @dma_addr: device virtual address for protected memory access + * @flags: protection flags but actually, protection_id + * @chunk_size: length in bytes of each chunk. + * @bus_address: if @chunk_count is 1, this is the physical address the chunk. + * if @chunk_count > 1, this is the physical address of unsigned + * long array of @chunk_count elements that contains the physical + * address of each chunk. + */ +struct ion_buffer_prot_info { + unsigned int chunk_count; + unsigned int dma_addr; + unsigned int flags; + unsigned int chunk_size; + unsigned long bus_address; +}; + #ifdef CONFIG_ION_CARVEOUT_HEAP extern struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); #else @@ -34,4 +54,28 @@ extern struct ion_heap *ion_cma_heap_create(struct cma *cma, #define ion_cma_heap_create(cma, p) ERR_PTR(-ENODEV) #endif +#if defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) && defined(CONFIG_ION_EXYNOS) +int __init ion_secure_iova_pool_create(void) +#else +static inline int ion_secure_iova_pool_create(void) +{ + return 0; +} +#endif + +#ifdef CONFIG_ION_EXYNOS +void *ion_buffer_protect_single(unsigned int protection_id, unsigned int size, + unsigned long phys, unsigned int protalign); +void ion_buffer_unprotect(void *priv); +#else +static inline void *ion_buffer_protect_single(unsigned int protection_id, + unsigned int size, + unsigned long phys, + unsigned int protalign) +{ + return NULL; +} +#define ion_buffer_unprotect(priv) do { } while (0) +#endif + #endif /* _ION_EXYNOS_H_ */ diff --git a/drivers/staging/android/ion/ion_fdt_exynos.c b/drivers/staging/android/ion/ion_fdt_exynos.c index f38ac58ca441..1a64e9bb3493 100644 --- a/drivers/staging/android/ion/ion_fdt_exynos.c +++ b/drivers/staging/android/ion/ion_fdt_exynos.c @@ -29,19 +29,27 @@ struct ion_reserved_mem_struct { phys_addr_t base; phys_addr_t size; unsigned int alloc_align; + unsigned int protection_id; + bool secure; bool untouchable; } ion_reserved_mem[ION_NUM_HEAP_IDS - 1] __initdata; static int __init exynos_ion_reserved_mem_setup(struct reserved_mem *rmem) { - bool untch, reusable; + bool untch, reusable, secure; size_t alloc_align = PAGE_SIZE; char *heapname; const __be32 *prop; + __u32 protection_id = 0; int len; reusable = !!of_get_flat_dt_prop(rmem->fdt_node, "ion,reusable", NULL); untch = !!of_get_flat_dt_prop(rmem->fdt_node, "ion,untouchable", NULL); + secure = !!of_get_flat_dt_prop(rmem->fdt_node, "ion,secure", NULL); + + prop = of_get_flat_dt_prop(rmem->fdt_node, "ion,protection_id", &len); + if (prop) + protection_id = be32_to_cpu(prop[0]); prop = of_get_flat_dt_prop(rmem->fdt_node, "ion,alignment", &len); if (prop && (be32_to_cpu(prop[0]) >= PAGE_SIZE)) { @@ -89,6 +97,8 @@ static int __init exynos_ion_reserved_mem_setup(struct reserved_mem *rmem) ion_reserved_mem[reserved_mem_count].size = rmem->size; ion_reserved_mem[reserved_mem_count].heapname = heapname; ion_reserved_mem[reserved_mem_count].alloc_align = alloc_align; + ion_reserved_mem[reserved_mem_count].protection_id = protection_id; + ion_reserved_mem[reserved_mem_count].secure = secure; ion_reserved_mem[reserved_mem_count].untouchable = untch; reserved_mem_count++; @@ -100,15 +110,18 @@ RESERVEDMEM_OF_DECLARE(ion, "exynos9820-ion", exynos_ion_reserved_mem_setup); static int __init exynos_ion_register_heaps(void) { unsigned int i; + bool secure = false; for (i = 0; i < reserved_mem_count; i++) { struct ion_platform_heap pheap; struct ion_heap *heap; pheap.name = ion_reserved_mem[i].heapname; + pheap.id = ion_reserved_mem[i].protection_id; pheap.base = ion_reserved_mem[i].base; pheap.size = ion_reserved_mem[i].size; pheap.align = ion_reserved_mem[i].alloc_align; + pheap.secure = ion_reserved_mem[i].secure; pheap.untouchable = ion_reserved_mem[i].untouchable; if (ion_reserved_mem[i].cma) { @@ -128,8 +141,20 @@ static int __init exynos_ion_register_heaps(void) ion_device_add_heap(heap); pr_info("ION: registered '%s' heap\n", pheap.name); + + if (pheap.secure) + secure = true; } + /* + * ion_secure_iova_pool_create() should success. If it fails, it is + * because of design flaw or out of memory. Nothing to do with the + * failure. Just debug. ion_secure_iova_pool_create() disables + * protection if it fails. + */ + if (secure) + ion_secure_iova_pool_create(); + return 0; } device_initcall(exynos_ion_register_heaps); diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h index f6dece538655..a009dec8ef8d 100644 --- a/drivers/staging/android/uapi/ion.h +++ b/drivers/staging/android/uapi/ion.h @@ -61,6 +61,14 @@ enum ion_heap_type { * overhead. Mapping to userspace is not allowed. */ #define ION_FLAG_NOZEROED 8 +/* + * the allocated buffer is not allowed to access without a proper permission. + * Both of mmap() and dmabuf kmap/vmap will fail. Acessing by any other mapping + * will generate data abort exception and get oops. + * ION_FLAG_PROTECTED is only applicable to the heaps with security property. + * Other heaps ignore this flag. + */ +#define ION_FLAG_PROTECTED 16 /* * the allocated buffer does not have dirty cache line allocated. In other * words, ION flushes the cache even though allocation flags includes