android: ion: add buffer protection
authorCho KyongHo <pullip.cho@samsung.com>
Sun, 11 Feb 2018 08:08:35 +0000 (17:08 +0900)
committerSangwook Ju <sw.ju@samsung.com>
Mon, 14 May 2018 10:45:23 +0000 (19:45 +0900)
DRM video contents need protection of the decrypted video stream from
copying to insecure buffer. For the protection from copying, Exynos
SoC provides H/W based protection. Since H/W has limitation of
resources, the H/W needs the types of buffer protection, some special
limitation to alignments and limits to the memory pool to serve buffer
protection.
Every memory pool needs buffer protection should be specified in the
flattened device tree under 'reserved-memory' node like cma heaps and
carveout heaps. The heaps needs protection should have 'ion,secure'
boolean property that specifies buffers from the heap may needs buffer
protection. They also need 'ion,protection_id' which specifies the
types of buffer protection.
Buffers from the secure heaps are not protected unless users gives ION
ION_FLAG_PROTECTED flag. non-secure heaps just ignore the flag.

Change-Id: Ibef18c3fde7d628c2298abb95e71379c67cf7471
Signed-off-by: Cho KyongHo <pullip.cho@samsung.com>
drivers/staging/android/ion/Makefile
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion.h
drivers/staging/android/ion/ion_buffer_protect.c [new file with mode: 0644]
drivers/staging/android/ion/ion_carveout_heap.c
drivers/staging/android/ion/ion_cma_heap.c
drivers/staging/android/ion/ion_exynos.h
drivers/staging/android/ion/ion_fdt_exynos.c
drivers/staging/android/uapi/ion.h

index 7b46caaab30bd00aaa9df4b2ba4d30c73f4175f2..9a8c095bd5e407464280cdc87b0663d3b99afb09 100644 (file)
@@ -5,4 +5,4 @@ obj-$(CONFIG_ION_CARVEOUT_HEAP) += ion_carveout_heap.o
 obj-$(CONFIG_ION_CHUNK_HEAP) += ion_chunk_heap.o
 obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o
 obj-$(CONFIG_ION_TEST) += ion_test.o
-obj-$(CONFIG_ION_EXYNOS) += ion_fdt_exynos.o
+obj-$(CONFIG_ION_EXYNOS) += ion_fdt_exynos.o ion_buffer_protect.o
index 79522c7735ad868ae570077222354bcbd1ec53af..298edb9ffc455bfc99253a0ccd3365d7d27d451e 100644 (file)
@@ -301,6 +301,12 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
                return -EACCES;
        }
 
+       if ((buffer->flags & ION_FLAG_PROTECTED) != 0) {
+               pr_err("%s: mmap() to protected buffer is not allowed\n",
+                      __func__);
+               return -EACCES;
+       }
+
        if (!(buffer->flags & ION_FLAG_CACHED))
                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
index 69f3ee43384ae908de80011fa4b708d5f28d42f1..ebbcb80521c6efbbfa75c54d770d0642852c8144 100644 (file)
@@ -50,6 +50,7 @@ struct ion_platform_heap {
        phys_addr_t base;
        size_t size;
        phys_addr_t align;
+       bool secure;
        bool untouchable;
 };
 
diff --git a/drivers/staging/android/ion/ion_buffer_protect.c b/drivers/staging/android/ion/ion_buffer_protect.c
new file mode 100644 (file)
index 0000000..30bce3f
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * drivers/staging/android/ion/ion_buffer_protect.c
+ *
+ * Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "ion_exynos.h"
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+
+#define ION_SECURE_DMA_BASE    0x80000000
+#define ION_SECURE_DMA_END     0xE0000000
+
+static struct gen_pool *secure_iova_pool;
+static DEFINE_SPINLOCK(siova_pool_lock);
+
+static int ion_secure_iova_alloc(unsigned long *addr, unsigned long size,
+                                unsigned int align)
+{
+       unsigned int out_addr;
+
+       if (!secure_iova_pool) {
+               pr_err("%s: Secure IOVA pool is not created\n", __func__);
+               return -ENODEV;
+       }
+
+       spin_lock(&siova_pool_lock);
+       if (align > PAGE_SIZE) {
+               gen_pool_set_algo(secure_iova_pool,
+                                 find_first_fit_with_align, &align);
+               out_addr = gen_pool_alloc(secure_iova_pool, size);
+               gen_pool_set_algo(secure_iova_pool, NULL, NULL);
+       } else {
+               out_addr = gen_pool_alloc(secure_iova_pool, size);
+       }
+       spin_unlock(&siova_pool_lock);
+
+       if (out_addr == 0) {
+               pr_err("%s: failed alloc secure iova. %zu/%zu bytes used\n",
+                      __func__, gen_pool_avail(secure_iova_pool),
+                      gen_pool_size(secure_iova_pool));
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+void ion_secure_iova_free(unsigned long addr, unsigned long size)
+{
+       if (!secure_iova_pool) {
+               pr_err("%s: Secure IOVA pool is not created\n", __func__);
+               return;
+       }
+
+       spin_lock(&siova_pool_lock);
+       gen_pool_free(secure_iova_pool, addr, size);
+       spin_unlock(&siova_pool_lock);
+}
+
+int __init ion_secure_iova_pool_create(void)
+{
+       secure_iova_pool = gen_pool_create(PAGE_SHIFT, -1);
+       if (!secure_iova_pool) {
+               pr_err("%s: failed to create Secure IOVA pool\n", __func__);
+               return -ENOMEM;
+       }
+
+       if (gen_pool_add(secure_iova_pool, ION_SECURE_DMA_BASE,
+                        ION_SECURE_DMA_END - ION_SECURE_DMA_BASE, -1)) {
+               pr_err("%s: failed to set address range of Secure IOVA pool\n",
+                      __func__);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int ion_secure_protect(struct ion_buffer_prot_info *protdesc,
+                             unsigned int protalign)
+{
+       unsigned long size = protdesc->chunk_count * protdesc->chunk_size;
+       unsigned long dma_addr = 0;
+       drmdrv_result_t drmret = DRMDRV_OK;
+       int ret;
+
+       ret = ion_secure_iova_alloc(&dma_addr, size, max(protalign, PAGE_SIZE));
+       if (ret)
+               goto err_iova;
+
+       prot->dma_addr = (unsigned int)dma_addr;
+
+       __flush_dcache_area(prot, sizeof(*protdesc));
+       if (protdesc->chunk_count > 1)
+               __flush_dcache_area(phys_to_virt(protdesc->bus_address),
+                               sizeof(unsigned long) * protdesc->chunk_count);
+
+       drmret = exynos_smc(SMC_DRM_PPMP_PROT, virt_to_phys(protdesc), 0, 0);
+       if (drmret != DRMDRV_OK) {
+               ret = -EACCES;
+               goto err_smc;
+       }
+
+       return 0;
+err_smc:
+       ion_secure_iova_free(dma_addr, size);
+err_iova:
+       pr_err("%s: PROT:%d (err=%d,va=%#lx,len=%#lx,cnt=%u,flg=%u)\n",
+              __func__, SMC_DRM_PPMP_PROT, drmret, dma_addr, size,
+              protdesc->chunk_count, protdesc->flags);
+
+       return ret;
+}
+
+static int ion_secure_unprotect(struct ion_buffer_prot_info *protdesc)
+{
+       unsigned long size = protdesc->chunk_count * protdesc->chunk_size;
+       int ret;
+       /*
+        * No need to flush protdesc for unprotection because it is never
+        * modified since the buffer is protected.
+        */
+       ret = exynos_smc(SMC_DRM_PPMP_UNPROT, virt_to_phys(protdesc), 0, 0);
+
+       ion_secure_iova_free(info->prot_desc.dma_addr, size);
+
+       if (ret != DRMDRV_OK) {
+               pr_err("%s: UNPROT:%d(err=%d,va=%#lx,len=%#lx,cnt=%u,flg=%u)\n",
+                      __func__, SMC_DRM_PPMP_UNPROT, ret, protdesc->dma_addr,
+                      size, protdesc->chunk_count, protdesc->flags);
+               return -EACCES;
+       }
+
+       return 0;
+}
+
+#else /* !CONFIG_EXYNOS_CONTENT_PATH_PROTECTION */
+
+static int ion_secure_protect(struct ion_buffer_prot_info *prot,
+                             unsigned int protalign)
+{
+       return -ENODEV;
+}
+
+static int ion_secure_unprotect(struct ion_buffer_prot_info *prot)
+{
+       return -ENODEV;
+}
+
+#endif /* CONFIG_EXYNOS_CONTENT_PATH_PROTECTION */
+
+void *ion_buffer_protect_single(unsigned int protection_id, unsigned int size,
+                               unsigned long phys, unsigned int protalign)
+{
+       struct ion_buffer_prot_info *protdesc;
+       int ret;
+
+       if (!IS_ENABLED(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION))
+               return NULL;
+
+       protdesc = kmalloc(sizeof(*protdesc), GFP_KERNEL);
+       if (!protdesc)
+               return ERR_PTR(-ENOMEM);
+
+       protdesc->chunk_count = 1,
+       protdesc->flags = protection_id;
+       protdesc->chunk_size = size;
+       protdesc->bus_address = phys;
+
+       ret = ion_secure_protect(protdesc, protalign);
+       if (ret) {
+               kfree(protdesc);
+               return ERR_PTR(ret);
+       }
+
+       return protdesc;
+}
+
+void ion_buffer_unprotect(void *priv)
+{
+       struct ion_buffer_prot_info *protdesc = priv;
+
+       if (priv) {
+               ion_secure_unprotect(protdesc);
+               kfree(protdesc);
+       }
+}
index 41236d49b9063ba62d0dc2708546703f500ab84a..be9d66bab790bbcb13291f78e302a5758ad21501 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/cacheflush.h>
 
 #include "ion.h"
+#include "ion_exynos.h"
 
 #define ION_CARVEOUT_ALLOCATE_FAIL     -1
 
@@ -35,16 +36,15 @@ struct ion_carveout_heap {
        phys_addr_t base;
        size_t size;
        size_t alloc_align;
+       unsigned int protection_id;
+       bool secure;
        bool untouchable;
 };
 
-static phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+static phys_addr_t ion_carveout_allocate(struct ion_carveout_heap *heap,
                                         unsigned long size)
 {
-       struct ion_carveout_heap *carveout_heap =
-               container_of(heap, struct ion_carveout_heap, heap);
-       unsigned long offset = gen_pool_alloc(carveout_heap->pool,
-                               ALIGN(size, carveout_heap->alloc_align));
+       unsigned long offset = gen_pool_alloc(heap->pool, size);
 
        if (!offset)
                return ION_CARVEOUT_ALLOCATE_FAIL;
@@ -66,10 +66,19 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
                                      unsigned long size,
                                      unsigned long flags)
 {
+       struct ion_carveout_heap *carveout_heap =
+               container_of(heap, struct ion_carveout_heap, heap);
        struct sg_table *table;
+       unsigned long aligned_size = ALIGN(size, carveout_heap->alloc_align);
        phys_addr_t paddr;
        int ret;
 
+       if (carveout_heap->untouchable && !(flags & ION_FLAG_PROTECTED)) {
+               pr_err("%s: ION_FLAG_PROTECTED needed by untouchable heap %s\n",
+                      __func__, heap->name);
+               return -EACCES;
+       }
+
        table = kmalloc(sizeof(*table), GFP_KERNEL);
        if (!table)
                return -ENOMEM;
@@ -77,7 +86,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
        if (ret)
                goto err_free;
 
-       paddr = ion_carveout_allocate(heap, size);
+       paddr = ion_carveout_allocate(carveout_heap, aligned_size);
        if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
                ret = -ENOMEM;
                goto err_free_table;
@@ -86,8 +95,18 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
        sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
        buffer->sg_table = table;
 
-       return 0;
+       if (carveout_heap->secure && (flags & ION_FLAG_PROTECTED)) {
+               buffer->priv_virt = ion_buffer_protect_single(
+                                               carveout_heap->protection_id,
+                                               (unsigned int)aligned_size,
+                                               paddr,
+                                               carveout_heap->alloc_align);
+               if (IS_ERR(buffer->priv_virt))
+                       goto err_prot;
+       }
 
+       return 0;
+err_prot:
 err_free_table:
        sg_free_table(table);
 err_free:
@@ -103,6 +122,9 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
        struct page *page = sg_page(table->sgl);
        phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
 
+       if (carveout_heap->secure && (buffer->flags & ION_FLAG_PROTECTED))
+               ion_buffer_unprotect(buffer->priv_virt);
+
        if (!carveout_heap->untouchable) {
                ion_heap_buffer_zero(buffer);
                /* the free pages in carveout pool should be cache cold */
@@ -191,6 +213,8 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
        }
        carveout_heap->size = heap_data->size;
        carveout_heap->alloc_align = heap_data->align;
+       carveout_heap->protection_id = heap_data->id;
+       carveout_heap->secure = heap_data->secure;
        carveout_heap->untouchable = heap_data->untouchable;
 
        return &carveout_heap->heap;
index 62c5b4b860e52734c564ed6d540c63b6b78a5b52..8b9e3a474be5ab16b4e596e684a35e7d65957eb1 100644 (file)
 #include <asm/cacheflush.h>
 
 #include "ion.h"
+#include "ion_exynos.h"
 
 struct ion_cma_heap {
        struct ion_heap heap;
        struct cma *cma;
        unsigned int align_order;
+       unsigned int protection_id;
+       bool secure;
 };
 
 #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
@@ -48,6 +51,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
        unsigned long align = cma_heap->align_order;
        bool cacheflush = !(flags & ION_FLAG_CACHED) ||
                          ((flags & ION_FLAG_SYNC_FORCE) != 0);
+       bool protected = cma_heap->secure && (flags & ION_FLAG_PROTECTED);
        int ret = -ENOMEM;
 
        pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
@@ -85,14 +89,24 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
 
        sg_set_page(table->sgl, pages, size, 0);
 
-       buffer->priv_virt = pages;
        buffer->sg_table = table;
 
-       if (cacheflush)
+       if (cacheflush || protected)
                __flush_dcache_area(page_to_virt(pages), len);
 
-       return 0;
+       if (protected) {
+               buffer->priv_virt = ion_buffer_protect_single(
+                                       cma_heap->protection_id,
+                                       (unsigned int)len,
+                                       page_to_phys(pages),
+                                       PAGE_SIZE << cma_heap->align_order);
+               if (IS_ERR(buffer->priv_virt))
+                       goto err_prot;
+       }
 
+       return 0;
+err_prot:
+       sg_free_table(buffer->sg_table);
 free_mem:
        kfree(table);
 err:
@@ -103,11 +117,14 @@ err:
 static void ion_cma_free(struct ion_buffer *buffer)
 {
        struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
-       struct page *pages = buffer->priv_virt;
        unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+       bool protected = cma_heap->secure &&
+                        (buffer->flags & ION_FLAG_PROTECTED);
 
+       if (protected)
+               ion_buffer_unprotect(buffer->priv_virt);
        /* release memory */
-       cma_release(cma_heap->cma, pages, nr_pages);
+       cma_release(cma_heap->cma, sg_page(buffer->sg_table->sgl), nr_pages);
        /* release sg table */
        sg_free_table(buffer->sg_table);
        kfree(buffer->sg_table);
@@ -137,6 +154,8 @@ struct ion_heap *ion_cma_heap_create(struct cma *cma,
        cma_heap->heap.name = kstrndup(heap_data->name,
                                       MAX_HEAP_NAME - 1, GFP_KERNEL);
        cma_heap->align_order = get_order(heap_data->align);
+       cma_heap->secure = heap_data->secure;
+       cma_heap->protection_id = heap_data->id;
 
        return &cma_heap->heap;
 }
index c9c07ee8506aeaa03f37fbc2235f3d26a238a7cd..d2ee7620619e4fbd1e2502d4a54618066e23dbb2 100644 (file)
@@ -21,6 +21,26 @@ struct cma;
 struct ion_heap;
 struct ion_platform_heap;
 
+/**
+ * struct ion_buffer_prot_info - buffer protection information
+ * @chunk_count: number of physically contiguous memory chunks to protect
+ *               each chunk should has the same size.
+ * @dma_addr:    device virtual address for protected memory access
+ * @flags:       protection flags but actually, protection_id
+ * @chunk_size:  length in bytes of each chunk.
+ * @bus_address: if @chunk_count is 1, this is the physical address the chunk.
+ *               if @chunk_count > 1, this is the physical address of unsigned
+ *               long array of @chunk_count elements that contains the physical
+ *               address of each chunk.
+ */
+struct ion_buffer_prot_info {
+       unsigned int chunk_count;
+       unsigned int dma_addr;
+       unsigned int flags;
+       unsigned int chunk_size;
+       unsigned long bus_address;
+};
+
 #ifdef CONFIG_ION_CARVEOUT_HEAP
 extern struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
 #else
@@ -34,4 +54,28 @@ extern struct ion_heap *ion_cma_heap_create(struct cma *cma,
 #define ion_cma_heap_create(cma, p) ERR_PTR(-ENODEV)
 #endif
 
+#if defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) && defined(CONFIG_ION_EXYNOS)
+int __init ion_secure_iova_pool_create(void)
+#else
+static inline int ion_secure_iova_pool_create(void)
+{
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_ION_EXYNOS
+void *ion_buffer_protect_single(unsigned int protection_id, unsigned int size,
+                               unsigned long phys, unsigned int protalign);
+void ion_buffer_unprotect(void *priv);
+#else
+static inline void *ion_buffer_protect_single(unsigned int protection_id,
+                                             unsigned int size,
+                                             unsigned long phys,
+                                             unsigned int protalign)
+{
+       return NULL;
+}
+#define ion_buffer_unprotect(priv) do { } while (0)
+#endif
+
 #endif /* _ION_EXYNOS_H_ */
index f38ac58ca441551b37491ca0f9b1c12b3e4a9523..1a64e9bb3493148b0d3083d635d388fd788a382d 100644 (file)
@@ -29,19 +29,27 @@ struct ion_reserved_mem_struct {
        phys_addr_t     base;
        phys_addr_t     size;
        unsigned int    alloc_align;
+       unsigned int    protection_id;
+       bool            secure;
        bool            untouchable;
 } ion_reserved_mem[ION_NUM_HEAP_IDS - 1] __initdata;
 
 static int __init exynos_ion_reserved_mem_setup(struct reserved_mem *rmem)
 {
-       bool untch, reusable;
+       bool untch, reusable, secure;
        size_t alloc_align = PAGE_SIZE;
        char *heapname;
        const __be32 *prop;
+       __u32 protection_id = 0;
        int len;
 
        reusable = !!of_get_flat_dt_prop(rmem->fdt_node, "ion,reusable", NULL);
        untch = !!of_get_flat_dt_prop(rmem->fdt_node, "ion,untouchable", NULL);
+       secure = !!of_get_flat_dt_prop(rmem->fdt_node, "ion,secure", NULL);
+
+       prop = of_get_flat_dt_prop(rmem->fdt_node, "ion,protection_id", &len);
+       if (prop)
+               protection_id = be32_to_cpu(prop[0]);
 
        prop = of_get_flat_dt_prop(rmem->fdt_node, "ion,alignment", &len);
        if (prop && (be32_to_cpu(prop[0]) >= PAGE_SIZE)) {
@@ -89,6 +97,8 @@ static int __init exynos_ion_reserved_mem_setup(struct reserved_mem *rmem)
        ion_reserved_mem[reserved_mem_count].size = rmem->size;
        ion_reserved_mem[reserved_mem_count].heapname = heapname;
        ion_reserved_mem[reserved_mem_count].alloc_align = alloc_align;
+       ion_reserved_mem[reserved_mem_count].protection_id = protection_id;
+       ion_reserved_mem[reserved_mem_count].secure = secure;
        ion_reserved_mem[reserved_mem_count].untouchable = untch;
        reserved_mem_count++;
 
@@ -100,15 +110,18 @@ RESERVEDMEM_OF_DECLARE(ion, "exynos9820-ion", exynos_ion_reserved_mem_setup);
 static int __init exynos_ion_register_heaps(void)
 {
        unsigned int i;
+       bool secure = false;
 
        for (i = 0; i < reserved_mem_count; i++) {
                struct ion_platform_heap pheap;
                struct ion_heap *heap;
 
                pheap.name        = ion_reserved_mem[i].heapname;
+               pheap.id          = ion_reserved_mem[i].protection_id;
                pheap.base        = ion_reserved_mem[i].base;
                pheap.size        = ion_reserved_mem[i].size;
                pheap.align       = ion_reserved_mem[i].alloc_align;
+               pheap.secure      = ion_reserved_mem[i].secure;
                pheap.untouchable = ion_reserved_mem[i].untouchable;
 
                if (ion_reserved_mem[i].cma) {
@@ -128,8 +141,20 @@ static int __init exynos_ion_register_heaps(void)
 
                ion_device_add_heap(heap);
                pr_info("ION: registered '%s' heap\n", pheap.name);
+
+               if (pheap.secure)
+                       secure = true;
        }
 
+       /*
+        * ion_secure_iova_pool_create() should success. If it fails, it is
+        * because of design flaw or out of memory. Nothing to do with the
+        * failure. Just debug. ion_secure_iova_pool_create() disables
+        * protection if it fails.
+        */
+       if (secure)
+               ion_secure_iova_pool_create();
+
        return 0;
 }
 device_initcall(exynos_ion_register_heaps);
index f6dece538655a6ebc66ff329a96d881d057a8de8..a009dec8ef8d21421a3baaa9153a4b72f876c08f 100644 (file)
@@ -61,6 +61,14 @@ enum ion_heap_type {
  * overhead. Mapping to userspace is not allowed.
  */
 #define ION_FLAG_NOZEROED 8
+/*
+ * the allocated buffer is not allowed to access without a proper permission.
+ * Both of mmap() and dmabuf kmap/vmap will fail. Acessing by any other mapping
+ * will generate data abort exception and get oops.
+ * ION_FLAG_PROTECTED is only applicable to the heaps with security property.
+ * Other heaps ignore this flag.
+ */
+#define ION_FLAG_PROTECTED 16
 /*
  * the allocated buffer does not have dirty cache line allocated. In other
  * words, ION flushes the cache even though allocation flags includes