From a7b98659cfed4b219fddfff4863b5e8476de2d9e Mon Sep 17 00:00:00 2001 From: Cho KyongHo Date: Wed, 21 Mar 2018 22:38:58 +0900 Subject: [PATCH] android: ion: add buffer protection to HPA heap Change-Id: I922780c3378a0e9d6e152ba1a41cc7aaa5cae0f2 Signed-off-by: Cho KyongHo --- .../staging/android/ion/ion_buffer_protect.c | 42 +++++++++++++++++++ drivers/staging/android/ion/ion_exynos.h | 12 ++++++ drivers/staging/android/ion/ion_hpa_heap.c | 34 ++++++++++++++- 3 files changed, 87 insertions(+), 1 deletion(-) diff --git a/drivers/staging/android/ion/ion_buffer_protect.c b/drivers/staging/android/ion/ion_buffer_protect.c index 5b6363765177..e956ce203faa 100644 --- a/drivers/staging/android/ion/ion_buffer_protect.c +++ b/drivers/staging/android/ion/ion_buffer_protect.c @@ -189,12 +189,54 @@ void *ion_buffer_protect_single(unsigned int protection_id, unsigned int size, return protdesc; } +void *ion_buffer_protect_multi(unsigned int protection_id, unsigned int count, + unsigned int chunk_size, unsigned long *phys_arr, + unsigned int protalign) +{ + struct ion_buffer_prot_info *protdesc; + int ret; + + if (!IS_ENABLED(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION)) + return NULL; + + if (count == 1) + return ion_buffer_protect_single(protection_id, chunk_size, + *phys_arr, protalign); + + protdesc = kmalloc(sizeof(*protdesc), GFP_KERNEL); + if (!protdesc) + return ERR_PTR(-ENOMEM); + + /* + * The address pointed by phys_arr is stored to the protection metadata + * after conversion to its physical address. + */ + kmemleak_ignore(phys_arr); + + protdesc->chunk_count = count, + protdesc->flags = protection_id; + protdesc->chunk_size = chunk_size; + protdesc->bus_address = virt_to_phys(phys_arr); + + ret = ion_secure_protect(protdesc, protalign); + if (ret) { + pr_err("%s: protection failure (id%u,len%u,count%u,align%#x\n", + __func__, protection_id, chunk_size, count, protalign); + kfree(protdesc); + return ERR_PTR(ret); + } + + return protdesc; +} + void ion_buffer_unprotect(void *priv) { struct ion_buffer_prot_info *protdesc = priv; if (priv) { ion_secure_unprotect(protdesc); + if (protdesc->chunk_count > 1) + kfree(phys_to_virt(protdesc->bus_address)); kfree(protdesc); } } diff --git a/drivers/staging/android/ion/ion_exynos.h b/drivers/staging/android/ion/ion_exynos.h index fc9f5ef16b60..f6a8e87519c1 100644 --- a/drivers/staging/android/ion/ion_exynos.h +++ b/drivers/staging/android/ion/ion_exynos.h @@ -77,6 +77,9 @@ static inline int ion_secure_iova_pool_create(void) #ifdef CONFIG_ION_EXYNOS void *ion_buffer_protect_single(unsigned int protection_id, unsigned int size, unsigned long phys, unsigned int protalign); +void *ion_buffer_protect_multi(unsigned int protection_id, unsigned int count, + unsigned int chunk_size, unsigned long *phys_arr, + unsigned int protalign); void ion_buffer_unprotect(void *priv); void exynos_ion_fixup(struct ion_device *idev); int exynos_ion_alloc_fixup(struct ion_device *idev, struct ion_buffer *buffer); @@ -95,6 +98,15 @@ static inline void *ion_buffer_protect_single(unsigned int protection_id, { return NULL; } + +static inline void *ion_buffer_protect_multi(unsigned int protection_id, + unsigned int count, + unsigned int chunk_size, + unsigned long *phys_arr, + unsigned int protalign) +{ + return NULL; +} #define ion_buffer_unprotect(priv) do { } while (0) #define exynos_ion_fixup(idev) do { } while (0) static inline int exynos_ion_alloc_fixup(struct ion_device *idev, diff --git a/drivers/staging/android/ion/ion_hpa_heap.c b/drivers/staging/android/ion/ion_hpa_heap.c index 650a1ec1346f..ba1b2f0b15eb 100644 --- a/drivers/staging/android/ion/ion_hpa_heap.c +++ b/drivers/staging/android/ion/ion_hpa_heap.c @@ -23,6 +23,7 @@ #include #include "ion.h" +#include "ion_exynos.h" #define ION_HPA_CHUNK_SIZE(heap) (PAGE_SIZE << (heap)->order) #define ION_HPA_PAGE_COUNT(len, heap) \ @@ -56,8 +57,10 @@ static int ion_hpa_allocate(struct ion_heap *heap, bool zero = !(flags & ION_FLAG_NOZEROED); bool cacheflush = !(flags & ION_FLAG_CACHED) || ((flags & ION_FLAG_SYNC_FORCE) != 0); + bool protected = hpa_heap->secure && (flags & ION_FLAG_PROTECTED); size_t desc_size = sizeof(struct page *) * count; struct page **pages; + unsigned long *phys; struct sg_table *sgt; struct scatterlist *sg; int ret, i; @@ -88,6 +91,17 @@ static int ion_hpa_allocate(struct ion_heap *heap, sort(pages, count, sizeof(*pages), ion_hpa_compare_pages, NULL); + if (protected) { + cacheflush = true; + zero = false; + } + + /* + * convert a page descriptor into its corresponding physical address + * in place to reduce memory allocation + */ + phys = (unsigned long *)pages; + for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) { if (zero) memset(page_address(pages[i]), 0, @@ -97,13 +111,27 @@ static int ion_hpa_allocate(struct ion_heap *heap, ION_HPA_CHUNK_SIZE(hpa_heap)); sg_set_page(sg, pages[i], ION_HPA_CHUNK_SIZE(hpa_heap), 0); + phys[i] = page_to_phys(pages[i]); } - kfree(pages); + if (protected) { + buffer->priv_virt = ion_buffer_protect_multi( + hpa_heap->protection_id, + ION_HPA_CHUNK_SIZE(hpa_heap), + (unsigned int)len, phys, + ION_HPA_CHUNK_SIZE(hpa_heap)); + if (IS_ERR(buffer->priv_virt)) + goto err_prot; + } else { + kfree(pages); + } buffer->sg_table = sgt; return 0; +err_prot: + for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) + __free_pages(sg_page(sg), hpa_heap->order); err_pages: sg_free_table(sgt); err_sg: @@ -117,10 +145,14 @@ err_sgt: static void ion_hpa_free(struct ion_buffer *buffer) { struct ion_hpa_heap *hpa_heap = to_hpa_heap(buffer->heap); + bool protected = hpa_heap->secure && + (buffer->flags & ION_FLAG_PROTECTED); struct sg_table *sgt = buffer->sg_table; struct scatterlist *sg; int i; + if (protected) + ion_buffer_unprotect(buffer->priv_virt); for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) __free_pages(sg_page(sg), hpa_heap->order); sg_free_table(sgt); -- 2.20.1