2 * This confidential and proprietary software may be used only as
3 * authorised by a licensing agreement from ARM Limited
4 * (C) COPYRIGHT 2008-2011, 2013-2015 ARM Limited
6 * The entire notice above must be reproduced on all authorised
7 * copies and copies may only be made to the extent permitted
8 * by a licensing agreement from ARM Limited.
11 /* needed to detect kernel version specific code */
12 #include <linux/version.h>
14 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
15 #include <linux/semaphore.h>
16 #else /* pre 2.6.26 the file was in the arch specific location */
17 #include <asm/semaphore.h>
20 #include <linux/dma-mapping.h>
22 #include <linux/slab.h>
23 #include <asm/atomic.h>
24 #include <linux/vmalloc.h>
25 #include <asm/cacheflush.h>
26 #include "ump_kernel_common.h"
27 #include "ump_kernel_memory_backend.h"
31 typedef struct os_allocator
{
32 struct semaphore mutex
;
33 u32 num_pages_max
; /**< Maximum number of pages to allocate from the OS */
34 u32 num_pages_allocated
; /**< Number of pages allocated from the OS */
39 static void os_free(void *ctx
, ump_dd_mem
*descriptor
);
40 static int os_allocate(void *ctx
, ump_dd_mem
*descriptor
);
41 static void os_memory_backend_destroy(ump_memory_backend
*backend
);
42 static u32
os_stat(struct ump_memory_backend
*backend
);
47 * Create OS memory backend
49 ump_memory_backend
*ump_os_memory_backend_create(const int max_allocation
)
51 ump_memory_backend
*backend
;
54 info
= kmalloc(sizeof(os_allocator
), GFP_KERNEL
);
59 info
->num_pages_max
= max_allocation
>> PAGE_SHIFT
;
60 info
->num_pages_allocated
= 0;
62 sema_init(&info
->mutex
, 1);
64 backend
= kmalloc(sizeof(ump_memory_backend
), GFP_KERNEL
);
65 if (NULL
== backend
) {
71 backend
->allocate
= os_allocate
;
72 backend
->release
= os_free
;
73 backend
->shutdown
= os_memory_backend_destroy
;
74 backend
->stat
= os_stat
;
75 backend
->pre_allocate_physical_check
= NULL
;
76 backend
->adjust_to_mali_phys
= NULL
;
84 * Destroy specified OS memory backend
86 static void os_memory_backend_destroy(ump_memory_backend
*backend
)
88 os_allocator
*info
= (os_allocator
*)backend
->ctx
;
90 DBG_MSG_IF(1, 0 != info
->num_pages_allocated
, ("%d pages still in use during shutdown\n", info
->num_pages_allocated
));
101 static int os_allocate(void *ctx
, ump_dd_mem
*descriptor
)
105 int pages_allocated
= 0;
111 info
= (os_allocator
*)ctx
;
112 left
= descriptor
->size_bytes
;
113 is_cached
= descriptor
->is_cached
;
115 if (down_interruptible(&info
->mutex
)) {
116 DBG_MSG(1, ("Failed to get mutex in os_free\n"));
117 return 0; /* failure */
120 descriptor
->backend_info
= NULL
;
121 descriptor
->nr_blocks
= ((left
+ PAGE_SIZE
- 1) & ~(PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
123 DBG_MSG(5, ("Allocating page array. Size: %lu\n", descriptor
->nr_blocks
* sizeof(ump_dd_physical_block
)));
125 descriptor
->block_array
= (ump_dd_physical_block
*)vmalloc(sizeof(ump_dd_physical_block
) * descriptor
->nr_blocks
);
126 if (NULL
== descriptor
->block_array
) {
128 DBG_MSG(1, ("Block array could not be allocated\n"));
129 return 0; /* failure */
132 while (left
> 0 && ((info
->num_pages_allocated
+ pages_allocated
) < info
->num_pages_max
)) {
133 struct page
*new_page
;
136 new_page
= alloc_page(GFP_HIGHUSER
| __GFP_ZERO
| __GFP_REPEAT
| __GFP_NOWARN
);
138 new_page
= alloc_page(GFP_HIGHUSER
| __GFP_ZERO
| __GFP_REPEAT
| __GFP_NOWARN
| __GFP_COLD
);
140 if (NULL
== new_page
) {
144 /* Ensure page caches are flushed. */
146 descriptor
->block_array
[pages_allocated
].addr
= page_to_phys(new_page
);
147 descriptor
->block_array
[pages_allocated
].size
= PAGE_SIZE
;
149 descriptor
->block_array
[pages_allocated
].addr
= dma_map_page(NULL
, new_page
, 0, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
150 descriptor
->block_array
[pages_allocated
].size
= PAGE_SIZE
;
153 DBG_MSG(5, ("Allocated page 0x%08lx cached: %d\n", descriptor
->block_array
[pages_allocated
].addr
, is_cached
));
155 if (left
< PAGE_SIZE
) {
164 DBG_MSG(5, ("Alloce for ID:%2d got %d pages, cached: %d\n", descriptor
->secure_id
, pages_allocated
));
167 DBG_MSG(1, ("Failed to allocate needed pages\n"));
169 while (pages_allocated
) {
172 dma_unmap_page(NULL
, descriptor
->block_array
[pages_allocated
].addr
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
174 __free_page(pfn_to_page(descriptor
->block_array
[pages_allocated
].addr
>> PAGE_SHIFT
));
179 return 0; /* failure */
182 info
->num_pages_allocated
+= pages_allocated
;
184 DBG_MSG(6, ("%d out of %d pages now allocated\n", info
->num_pages_allocated
, info
->num_pages_max
));
188 return 1; /* success*/
193 * Free specified UMP memory
195 static void os_free(void *ctx
, ump_dd_mem
*descriptor
)
203 info
= (os_allocator
*)ctx
;
205 BUG_ON(descriptor
->nr_blocks
> info
->num_pages_allocated
);
207 if (down_interruptible(&info
->mutex
)) {
208 DBG_MSG(1, ("Failed to get mutex in os_free\n"));
212 DBG_MSG(5, ("Releasing %lu OS pages\n", descriptor
->nr_blocks
));
214 info
->num_pages_allocated
-= descriptor
->nr_blocks
;
218 for (i
= 0; i
< descriptor
->nr_blocks
; i
++) {
219 DBG_MSG(6, ("Freeing physical page. Address: 0x%08lx\n", descriptor
->block_array
[i
].addr
));
220 if (! descriptor
->is_cached
) {
221 dma_unmap_page(NULL
, descriptor
->block_array
[i
].addr
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
223 __free_page(pfn_to_page(descriptor
->block_array
[i
].addr
>> PAGE_SHIFT
));
226 vfree(descriptor
->block_array
);
230 static u32
os_stat(struct ump_memory_backend
*backend
)
233 info
= (os_allocator
*)backend
->ctx
;
234 return info
->num_pages_allocated
* _MALI_OSK_MALI_PAGE_SIZE
;