[RAMEN9610-12171] android: ion: cacheflush on entire proteted area
authorCho KyongHo <pullip.cho@samsung.com>
Tue, 15 Jan 2019 12:28:21 +0000 (21:28 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:23:17 +0000 (20:23 +0300)
Pages from the CMA heap tends to be cached in the CPU caches with dirty
state since they are served as anon and page cache pages for the
userspace.
Flushing caches on buffer allocation is intended for prenventing
corruption from writing back to DRAM from the dirth cache lines while
updating the buffer from DMA. However, cache flush should be performed
on the entire allocated area if the buffer is to be protected from
non-secure access to prevent the dirty write-back to the protected area.
It is also applied to the carveout heap because allocation from a
carveout heap by an anonymous user is not prohibited.

Change-Id: Ieb1ae2efacefe1d23f03a4cc72615d45b4309eb7
Signed-off-by: Cho KyongHo <pullip.cho@samsung.com>
drivers/staging/android/ion/ion_carveout_heap.c
drivers/staging/android/ion/ion_cma_heap.c

index d26a020df8568077b9127532476a4a74b7851ae0..71d65959a8cc6181f291a46dc48d42b917726a74 100644 (file)
@@ -131,21 +131,23 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
        struct sg_table *table = buffer->sg_table;
        struct page *page = sg_page(table->sgl);
        phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+       unsigned long size = buffer->size;
        int unprot_err = 0;
 
-       if (carveout_heap->secure && (buffer->flags & ION_FLAG_PROTECTED))
+       if (carveout_heap->secure && (buffer->flags & ION_FLAG_PROTECTED)) {
                unprot_err = ion_buffer_unprotect(buffer->priv_virt);
+               size = ALIGN(size, carveout_heap->alloc_align);
+       }
 
        if (!unprot_err) {
                if (!carveout_heap->untouchable) {
                        ion_heap_buffer_zero(buffer);
                        /* free pages in carveout pool should be cache cold */
                        if ((buffer->flags & ION_FLAG_CACHED) != 0)
-                               __flush_dcache_area(page_to_virt(page),
-                                                   buffer->size);
+                               __flush_dcache_area(page_to_virt(page), size);
                }
 
-               ion_carveout_free(carveout_heap, paddr, buffer->size);
+               ion_carveout_free(carveout_heap, paddr, size);
        }
        sg_free_table(table);
        kfree(table);
index dd96cb51592e64a2ead853dabc3549646949614d..0adbd43355bce330cd5bd1344ee65b6db2fd2d1b 100644 (file)
@@ -94,8 +94,15 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
 
        buffer->sg_table = table;
 
+       /*
+        * No need to flush more than the requiered size. But clearing dirty
+        * data from the CPU caches should be performed on the entire area
+        * to be protected because writing back from the CPU caches with non-
+        * secure property to the protected area results system error.
+        */
        if (cacheflush || protected)
-               __flush_dcache_area(page_to_virt(pages), len);
+               __flush_dcache_area(page_to_virt(pages),
+                                   nr_pages << PAGE_SHIFT);
 
        if (protected) {
                buffer->priv_virt = ion_buffer_protect_single(