2 * This confidential and proprietary software may be used only as
3 * authorised by a licensing agreement from ARM Limited
4 * (C) COPYRIGHT 2013 ARM Limited
6 * The entire notice above must be reproduced on all authorised
7 * copies and copies may only be made to the extent permitted
8 * by a licensing agreement from ARM Limited.
11 #include <linux/list.h>
13 #include <linux/mm_types.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/version.h>
17 #include <linux/platform_device.h>
18 #include <linux/workqueue.h>
21 #include "mali_memory.h"
22 #include "mali_memory_os_alloc.h"
23 #include "mali_kernel_linux.h"
25 /* Minimum size of allocator page pool */
26 #define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
27 #define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
29 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
30 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
31 static int mali_mem_os_shrink(int nr_to_scan
, gfp_t gfp_mask
);
33 static int mali_mem_os_shrink(struct shrinker
*shrinker
, int nr_to_scan
, gfp_t gfp_mask
);
36 static int mali_mem_os_shrink(struct shrinker
*shrinker
, struct shrink_control
*sc
);
38 static void mali_mem_os_trim_pool(struct work_struct
*work
);
40 static struct mali_mem_os_allocator
{
42 struct list_head pool_pages
;
45 atomic_t allocated_pages
;
46 size_t allocation_limit
;
48 struct shrinker shrinker
;
49 struct delayed_work timed_shrinker
;
50 struct workqueue_struct
*wq
;
51 } mali_mem_os_allocator
= {
52 .pool_lock
= __SPIN_LOCK_UNLOCKED(pool_lock
),
53 .pool_pages
= LIST_HEAD_INIT(mali_mem_os_allocator
.pool_pages
),
56 .allocated_pages
= ATOMIC_INIT(0),
57 .allocation_limit
= 0,
59 .shrinker
.shrink
= mali_mem_os_shrink
,
60 .shrinker
.seeks
= DEFAULT_SEEKS
,
61 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
62 .timed_shrinker
= __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator
.timed_shrinker
, mali_mem_os_trim_pool
, TIMER_DEFERRABLE
),
63 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)
64 .timed_shrinker
= __DEFERRED_WORK_INITIALIZER(mali_mem_os_allocator
.timed_shrinker
, mali_mem_os_trim_pool
),
66 .timed_shrinker
= __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator
.timed_shrinker
, mali_mem_os_trim_pool
),
70 static void mali_mem_os_free(mali_mem_allocation
*descriptor
)
74 MALI_DEBUG_ASSERT(MALI_MEM_OS
== descriptor
->type
);
76 atomic_sub(descriptor
->os_mem
.count
, &mali_mem_os_allocator
.allocated_pages
);
78 /* Put pages on pool. */
79 list_cut_position(&pages
, &descriptor
->os_mem
.pages
, descriptor
->os_mem
.pages
.prev
);
81 spin_lock(&mali_mem_os_allocator
.pool_lock
);
83 list_splice(&pages
, &mali_mem_os_allocator
.pool_pages
);
84 mali_mem_os_allocator
.pool_count
+= descriptor
->os_mem
.count
;
86 spin_unlock(&mali_mem_os_allocator
.pool_lock
);
88 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES
< mali_mem_os_allocator
.pool_count
) {
89 MALI_DEBUG_PRINT(5, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator
.pool_count
));
90 queue_delayed_work(mali_mem_os_allocator
.wq
, &mali_mem_os_allocator
.timed_shrinker
, MALI_OS_MEMORY_POOL_TRIM_JIFFIES
);
94 static int mali_mem_os_alloc_pages(mali_mem_allocation
*descriptor
, u32 size
)
96 struct page
*new_page
, *tmp
;
98 size_t page_count
= PAGE_ALIGN(size
) / _MALI_OSK_MALI_PAGE_SIZE
;
99 size_t remaining
= page_count
;
102 MALI_DEBUG_ASSERT_POINTER(descriptor
);
103 MALI_DEBUG_ASSERT(MALI_MEM_OS
== descriptor
->type
);
105 INIT_LIST_HEAD(&descriptor
->os_mem
.pages
);
106 descriptor
->os_mem
.count
= page_count
;
108 /* Grab pages from pool. */
111 spin_lock(&mali_mem_os_allocator
.pool_lock
);
112 pool_pages
= min(remaining
, mali_mem_os_allocator
.pool_count
);
113 for (i
= pool_pages
; i
> 0; i
--) {
114 BUG_ON(list_empty(&mali_mem_os_allocator
.pool_pages
));
115 list_move(mali_mem_os_allocator
.pool_pages
.next
, &pages
);
117 mali_mem_os_allocator
.pool_count
-= pool_pages
;
118 remaining
-= pool_pages
;
119 spin_unlock(&mali_mem_os_allocator
.pool_lock
);
122 /* Process pages from pool. */
124 list_for_each_entry_safe(new_page
, tmp
, &pages
, lru
) {
125 BUG_ON(NULL
== new_page
);
127 list_move_tail(&new_page
->lru
, &descriptor
->os_mem
.pages
);
130 /* Allocate new pages, if needed. */
131 for (i
= 0; i
< remaining
; i
++) {
134 new_page
= alloc_page(GFP_HIGHUSER
| __GFP_ZERO
| __GFP_REPEAT
| __GFP_NOWARN
| __GFP_COLD
);
136 if (unlikely(NULL
== new_page
)) {
137 /* Calculate the number of pages actually allocated, and free them. */
138 descriptor
->os_mem
.count
= (page_count
- remaining
) + i
;
139 atomic_add(descriptor
->os_mem
.count
, &mali_mem_os_allocator
.allocated_pages
);
140 mali_mem_os_free(descriptor
);
144 /* Ensure page is flushed from CPU caches. */
145 dma_addr
= dma_map_page(&mali_platform_device
->dev
, new_page
,
146 0, _MALI_OSK_MALI_PAGE_SIZE
, DMA_TO_DEVICE
);
148 /* Store page phys addr */
149 SetPagePrivate(new_page
);
150 set_page_private(new_page
, dma_addr
);
152 list_add_tail(&new_page
->lru
, &descriptor
->os_mem
.pages
);
155 atomic_add(page_count
, &mali_mem_os_allocator
.allocated_pages
);
157 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES
> mali_mem_os_allocator
.pool_count
) {
158 MALI_DEBUG_PRINT(4, ("OS Mem: Stopping pool trim timer, only %u pages on pool\n", mali_mem_os_allocator
.pool_count
));
159 cancel_delayed_work(&mali_mem_os_allocator
.timed_shrinker
);
165 static int mali_mem_os_mali_map(mali_mem_allocation
*descriptor
, struct mali_session_data
*session
)
167 struct mali_page_directory
*pagedir
= session
->page_directory
;
169 _mali_osk_errcode_t err
;
170 u32 virt
= descriptor
->mali_mapping
.addr
;
171 u32 prop
= descriptor
->mali_mapping
.properties
;
173 MALI_DEBUG_ASSERT(MALI_MEM_OS
== descriptor
->type
);
175 err
= mali_mem_mali_map_prepare(descriptor
);
176 if (_MALI_OSK_ERR_OK
!= err
) {
180 list_for_each_entry(page
, &descriptor
->os_mem
.pages
, lru
) {
181 u32 phys
= page_private(page
);
182 mali_mmu_pagedir_update(pagedir
, virt
, phys
, MALI_MMU_PAGE_SIZE
, prop
);
183 virt
+= MALI_MMU_PAGE_SIZE
;
189 static void mali_mem_os_mali_unmap(struct mali_session_data
*session
, mali_mem_allocation
*descriptor
)
191 mali_mem_mali_map_free(descriptor
);
194 static int mali_mem_os_cpu_map(mali_mem_allocation
*descriptor
, struct vm_area_struct
*vma
)
198 unsigned long addr
= vma
->vm_start
;
200 list_for_each_entry(page
, &descriptor
->os_mem
.pages
, lru
) {
201 /* We should use vm_insert_page, but it does a dcache
202 * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
203 ret = vm_insert_page(vma, addr, page);
205 ret
= vm_insert_pfn(vma
, addr
, page_to_pfn(page
));
207 if (unlikely(0 != ret
)) {
210 addr
+= _MALI_OSK_MALI_PAGE_SIZE
;
216 mali_mem_allocation
*mali_mem_os_alloc(u32 mali_addr
, u32 size
, struct vm_area_struct
*vma
, struct mali_session_data
*session
)
218 mali_mem_allocation
*descriptor
;
221 if (atomic_read(&mali_mem_os_allocator
.allocated_pages
) * _MALI_OSK_MALI_PAGE_SIZE
+ size
> mali_mem_os_allocator
.allocation_limit
) {
222 MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
224 atomic_read(&mali_mem_os_allocator
.allocated_pages
) * _MALI_OSK_MALI_PAGE_SIZE
,
225 mali_mem_os_allocator
.allocation_limit
));
229 descriptor
= mali_mem_descriptor_create(session
, MALI_MEM_OS
);
230 if (NULL
== descriptor
) return NULL
;
232 descriptor
->mali_mapping
.addr
= mali_addr
;
233 descriptor
->size
= size
;
234 descriptor
->cpu_mapping
.addr
= (void __user
*)vma
->vm_start
;
235 descriptor
->cpu_mapping
.ref
= 1;
237 if (VM_SHARED
== (VM_SHARED
& vma
->vm_flags
)) {
238 descriptor
->mali_mapping
.properties
= MALI_MMU_FLAGS_DEFAULT
;
240 /* Cached Mali memory mapping */
241 descriptor
->mali_mapping
.properties
= MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE
;
242 vma
->vm_flags
|= VM_SHARED
;
245 err
= mali_mem_os_alloc_pages(descriptor
, size
); /* Allocate pages */
246 if (0 != err
) goto alloc_failed
;
248 /* Take session memory lock */
249 _mali_osk_mutex_wait(session
->memory_lock
);
251 err
= mali_mem_os_mali_map(descriptor
, session
); /* Map on Mali */
252 if (0 != err
) goto mali_map_failed
;
254 _mali_osk_mutex_signal(session
->memory_lock
);
256 err
= mali_mem_os_cpu_map(descriptor
, vma
); /* Map on CPU */
257 if (0 != err
) goto cpu_map_failed
;
262 mali_mem_os_mali_unmap(session
, descriptor
);
264 _mali_osk_mutex_signal(session
->memory_lock
);
265 mali_mem_os_free(descriptor
);
267 mali_mem_descriptor_destroy(descriptor
);
268 MALI_DEBUG_PRINT(2, ("OS allocator: Failed to allocate memory (%d)\n", err
));
272 void mali_mem_os_release(mali_mem_allocation
*descriptor
)
274 struct mali_session_data
*session
= descriptor
->session
;
276 /* Unmap the memory from the mali virtual address space. */
277 mali_mem_os_mali_unmap(session
, descriptor
);
280 mali_mem_os_free(descriptor
);
284 #define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128
288 mali_io_address mapping
;
289 } page
[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE
];
292 } mali_mem_page_table_page_pool
= {
294 .lock
= __SPIN_LOCK_UNLOCKED(pool_lock
),
297 _mali_osk_errcode_t
mali_mem_os_get_table_page(u32
*phys
, mali_io_address
*mapping
)
299 _mali_osk_errcode_t ret
= _MALI_OSK_ERR_NOMEM
;
301 spin_lock(&mali_mem_page_table_page_pool
.lock
);
302 if (0 < mali_mem_page_table_page_pool
.count
) {
303 u32 i
= --mali_mem_page_table_page_pool
.count
;
304 *phys
= mali_mem_page_table_page_pool
.page
[i
].phys
;
305 *mapping
= mali_mem_page_table_page_pool
.page
[i
].mapping
;
307 ret
= _MALI_OSK_ERR_OK
;
309 spin_unlock(&mali_mem_page_table_page_pool
.lock
);
311 if (_MALI_OSK_ERR_OK
!= ret
) {
312 *mapping
= dma_alloc_writecombine(&mali_platform_device
->dev
, _MALI_OSK_MALI_PAGE_SIZE
, phys
, GFP_KERNEL
);
313 if (NULL
!= *mapping
) {
314 ret
= _MALI_OSK_ERR_OK
;
318 if (ret
!= _MALI_OSK_ERR_OK
)
320 MALI_DEBUG_PRINT(2, ("os_get_table_page fail: ret=%d, *mapping=%x\n", ret
, *mapping
));
321 MALI_DEBUG_PRINT(2, ("os_get_table_page fail: mali_mem_page_table_page_pool.count=%x\n", mali_mem_page_table_page_pool
.count
));
326 void mali_mem_os_release_table_page(u32 phys
, void *virt
)
328 spin_lock(&mali_mem_page_table_page_pool
.lock
);
329 if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE
> mali_mem_page_table_page_pool
.count
) {
330 u32 i
= mali_mem_page_table_page_pool
.count
;
331 mali_mem_page_table_page_pool
.page
[i
].phys
= phys
;
332 mali_mem_page_table_page_pool
.page
[i
].mapping
= virt
;
334 ++mali_mem_page_table_page_pool
.count
;
336 spin_unlock(&mali_mem_page_table_page_pool
.lock
);
338 spin_unlock(&mali_mem_page_table_page_pool
.lock
);
340 dma_free_writecombine(&mali_platform_device
->dev
, _MALI_OSK_MALI_PAGE_SIZE
, virt
, phys
);
344 static void mali_mem_os_free_page(struct page
*page
)
346 BUG_ON(page_count(page
) != 1);
348 dma_unmap_page(&mali_platform_device
->dev
, page_private(page
),
349 _MALI_OSK_MALI_PAGE_SIZE
, DMA_TO_DEVICE
);
351 ClearPagePrivate(page
);
356 /* The maximum number of page table pool pages to free in one go. */
357 #define MALI_MEM_OS_CHUNK_TO_FREE 64UL
359 /* Free a certain number of pages from the page table page pool.
360 * The pool lock must be held when calling the function, and the lock will be
361 * released before returning.
363 static void mali_mem_os_page_table_pool_free(size_t nr_to_free
)
365 u32 phys_arr
[MALI_MEM_OS_CHUNK_TO_FREE
];
366 void *virt_arr
[MALI_MEM_OS_CHUNK_TO_FREE
];
369 MALI_DEBUG_ASSERT(nr_to_free
<= MALI_MEM_OS_CHUNK_TO_FREE
);
371 /* Remove nr_to_free pages from the pool and store them locally on stack. */
372 for (i
= 0; i
< nr_to_free
; i
++) {
373 u32 pool_index
= mali_mem_page_table_page_pool
.count
- i
- 1;
375 phys_arr
[i
] = mali_mem_page_table_page_pool
.page
[pool_index
].phys
;
376 virt_arr
[i
] = mali_mem_page_table_page_pool
.page
[pool_index
].mapping
;
379 mali_mem_page_table_page_pool
.count
-= nr_to_free
;
381 spin_unlock(&mali_mem_page_table_page_pool
.lock
);
383 /* After releasing the spinlock: free the pages we removed from the pool. */
384 for (i
= 0; i
< nr_to_free
; i
++) {
385 dma_free_writecombine(&mali_platform_device
->dev
, _MALI_OSK_MALI_PAGE_SIZE
, virt_arr
[i
], phys_arr
[i
]);
389 static void mali_mem_os_trim_page_table_page_pool(void)
391 size_t nr_to_free
= 0;
394 /* Keep 2 page table pages for each 1024 pages in the page cache. */
395 nr_to_keep
= mali_mem_os_allocator
.pool_count
/ 512;
396 /* And a minimum of eight pages, to accomodate new sessions. */
399 if (0 == spin_trylock(&mali_mem_page_table_page_pool
.lock
)) return;
401 if (nr_to_keep
< mali_mem_page_table_page_pool
.count
) {
402 nr_to_free
= mali_mem_page_table_page_pool
.count
- nr_to_keep
;
403 nr_to_free
= min((size_t)MALI_MEM_OS_CHUNK_TO_FREE
, nr_to_free
);
406 /* Pool lock will be released by the callee. */
407 mali_mem_os_page_table_pool_free(nr_to_free
);
410 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
411 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
412 static int mali_mem_os_shrink(int nr_to_scan
, gfp_t gfp_mask
)
414 static int mali_mem_os_shrink(struct shrinker
*shrinker
, int nr_to_scan
, gfp_t gfp_mask
)
417 static int mali_mem_os_shrink(struct shrinker
*shrinker
, struct shrink_control
*sc
)
420 struct page
*page
, *tmp
;
422 struct list_head
*le
, pages
;
423 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
426 int nr
= sc
->nr_to_scan
;
430 //[BUGFIX]-Mod-BEGIN by SCDTABLET.zhangku.guo@tcl.com,06/09/2015,983716,mtk patch for fix ANR issue when TAT
431 //return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
432 return mali_mem_os_allocator
.pool_count
;
433 //[BUGFIX]-Mod-END by SCDTABLET.zhangku.guo@tcl.com
436 if (0 == mali_mem_os_allocator
.pool_count
) {
437 /* No pages availble */
441 if (0 == spin_trylock_irqsave(&mali_mem_os_allocator
.pool_lock
, flags
)) {
442 /* Not able to lock. */
446 /* Release from general page pool */
447 nr
= min((size_t)nr
, mali_mem_os_allocator
.pool_count
);
448 mali_mem_os_allocator
.pool_count
-= nr
;
449 list_for_each(le
, &mali_mem_os_allocator
.pool_pages
) {
453 list_cut_position(&pages
, &mali_mem_os_allocator
.pool_pages
, le
);
454 spin_unlock_irqrestore(&mali_mem_os_allocator
.pool_lock
, flags
);
456 list_for_each_entry_safe(page
, tmp
, &pages
, lru
) {
457 mali_mem_os_free_page(page
);
460 //[BUGFIX]-Del-BEGIN by SCDTABLET.zhangku.guo@tcl.com,06/09/2015,983716,mtk patch for fix ANR issue when TAT
461 /* Release some pages from page table page pool */
462 //mali_mem_os_trim_page_table_page_pool();
463 //[BUGFIX]-Del-END by SCDTABLET.zhangku.guo@tcl.com
465 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES
> mali_mem_os_allocator
.pool_count
) {
466 /* Pools are empty, stop timer */
467 MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator
.pool_count
));
468 cancel_delayed_work(&mali_mem_os_allocator
.timed_shrinker
);
471 //[BUGFIX]-Mod-BEGIN by SCDTABLET.zhangku.guo@tcl.com,06/09/2015,983716,mtk patch for fix ANR issue when TAT
472 //return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
473 return mali_mem_os_allocator
.pool_count
;
474 //[BUGFIX]-Mod-END by SCDTABLET.zhangku.guo@tcl.com
477 static void mali_mem_os_trim_pool(struct work_struct
*data
)
479 struct page
*page
, *tmp
;
480 struct list_head
*le
;
486 MALI_DEBUG_PRINT(3, ("OS Mem: Trimming pool %u\n", mali_mem_os_allocator
.pool_count
));
488 /* Release from general page pool */
489 spin_lock(&mali_mem_os_allocator
.pool_lock
);
490 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES
< mali_mem_os_allocator
.pool_count
) {
491 size_t count
= mali_mem_os_allocator
.pool_count
- MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES
;
492 /* Free half the pages on the pool above the static limit. Or 64 pages, 256KB. */
493 nr_to_free
= max(count
/ 2, (size_t)64);
495 mali_mem_os_allocator
.pool_count
-= nr_to_free
;
496 list_for_each(le
, &mali_mem_os_allocator
.pool_pages
) {
498 if (0 == nr_to_free
) break;
500 list_cut_position(&pages
, &mali_mem_os_allocator
.pool_pages
, le
);
502 spin_unlock(&mali_mem_os_allocator
.pool_lock
);
504 list_for_each_entry_safe(page
, tmp
, &pages
, lru
) {
505 mali_mem_os_free_page(page
);
508 /* Release some pages from page table page pool */
509 mali_mem_os_trim_page_table_page_pool();
511 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES
< mali_mem_os_allocator
.pool_count
) {
512 MALI_DEBUG_PRINT(4, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator
.pool_count
));
513 queue_delayed_work(mali_mem_os_allocator
.wq
, &mali_mem_os_allocator
.timed_shrinker
, MALI_OS_MEMORY_POOL_TRIM_JIFFIES
);
517 _mali_osk_errcode_t
mali_mem_os_init(void)
519 mali_mem_os_allocator
.wq
= alloc_workqueue("mali-mem", WQ_UNBOUND
, 1);
520 if (NULL
== mali_mem_os_allocator
.wq
) {
521 return _MALI_OSK_ERR_NOMEM
;
524 register_shrinker(&mali_mem_os_allocator
.shrinker
);
526 return _MALI_OSK_ERR_OK
;
529 void mali_mem_os_term(void)
531 struct page
*page
, *tmp
;
533 unregister_shrinker(&mali_mem_os_allocator
.shrinker
);
534 cancel_delayed_work_sync(&mali_mem_os_allocator
.timed_shrinker
);
535 destroy_workqueue(mali_mem_os_allocator
.wq
);
537 spin_lock(&mali_mem_os_allocator
.pool_lock
);
538 list_for_each_entry_safe(page
, tmp
, &mali_mem_os_allocator
.pool_pages
, lru
) {
539 mali_mem_os_free_page(page
);
541 --mali_mem_os_allocator
.pool_count
;
543 BUG_ON(mali_mem_os_allocator
.pool_count
);
544 spin_unlock(&mali_mem_os_allocator
.pool_lock
);
546 /* Release from page table page pool */
550 spin_lock(&mali_mem_page_table_page_pool
.lock
);
552 nr_to_free
= min((size_t)MALI_MEM_OS_CHUNK_TO_FREE
, mali_mem_page_table_page_pool
.count
);
554 /* Pool lock will be released by the callee. */
555 mali_mem_os_page_table_pool_free(nr_to_free
);
556 } while (0 != mali_mem_page_table_page_pool
.count
);
559 _mali_osk_errcode_t
mali_memory_core_resource_os_memory(u32 size
)
561 mali_mem_os_allocator
.allocation_limit
= size
;
566 u32
mali_mem_os_stat(void)
568 return atomic_read(&mali_mem_os_allocator
.allocated_pages
) * _MALI_OSK_MALI_PAGE_SIZE
;