import PULS_20180308
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / misc / mediatek / gpu / mt8127 / mali / mali / linux / mali_memory_os_alloc.c
1 /*
2 * This confidential and proprietary software may be used only as
3 * authorised by a licensing agreement from ARM Limited
4 * (C) COPYRIGHT 2013 ARM Limited
5 * ALL RIGHTS RESERVED
6 * The entire notice above must be reproduced on all authorised
7 * copies and copies may only be made to the extent permitted
8 * by a licensing agreement from ARM Limited.
9 */
10
11 #include <linux/list.h>
12 #include <linux/mm.h>
13 #include <linux/mm_types.h>
14 #include <linux/fs.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/version.h>
17 #include <linux/platform_device.h>
18 #include <linux/workqueue.h>
19
20 #include "mali_osk.h"
21 #include "mali_memory.h"
22 #include "mali_memory_os_alloc.h"
23 #include "mali_kernel_linux.h"
24
25 /* Minimum size of allocator page pool */
26 #define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
27 #define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
28
29 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
30 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
31 static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask);
32 #else
33 static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask);
34 #endif
35 #else
36 static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
37 #endif
38 static void mali_mem_os_trim_pool(struct work_struct *work);
39
40 static struct mali_mem_os_allocator {
41 spinlock_t pool_lock;
42 struct list_head pool_pages;
43 size_t pool_count;
44
45 atomic_t allocated_pages;
46 size_t allocation_limit;
47
48 struct shrinker shrinker;
49 struct delayed_work timed_shrinker;
50 struct workqueue_struct *wq;
51 } mali_mem_os_allocator = {
52 .pool_lock = __SPIN_LOCK_UNLOCKED(pool_lock),
53 .pool_pages = LIST_HEAD_INIT(mali_mem_os_allocator.pool_pages),
54 .pool_count = 0,
55
56 .allocated_pages = ATOMIC_INIT(0),
57 .allocation_limit = 0,
58
59 .shrinker.shrink = mali_mem_os_shrink,
60 .shrinker.seeks = DEFAULT_SEEKS,
61 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
62 .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool, TIMER_DEFERRABLE),
63 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)
64 .timed_shrinker = __DEFERRED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
65 #else
66 .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
67 #endif
68 };
69
70 static void mali_mem_os_free(mali_mem_allocation *descriptor)
71 {
72 LIST_HEAD(pages);
73
74 MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
75
76 atomic_sub(descriptor->os_mem.count, &mali_mem_os_allocator.allocated_pages);
77
78 /* Put pages on pool. */
79 list_cut_position(&pages, &descriptor->os_mem.pages, descriptor->os_mem.pages.prev);
80
81 spin_lock(&mali_mem_os_allocator.pool_lock);
82
83 list_splice(&pages, &mali_mem_os_allocator.pool_pages);
84 mali_mem_os_allocator.pool_count += descriptor->os_mem.count;
85
86 spin_unlock(&mali_mem_os_allocator.pool_lock);
87
88 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
89 MALI_DEBUG_PRINT(5, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
90 queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
91 }
92 }
93
94 static int mali_mem_os_alloc_pages(mali_mem_allocation *descriptor, u32 size)
95 {
96 struct page *new_page, *tmp;
97 LIST_HEAD(pages);
98 size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
99 size_t remaining = page_count;
100 u32 i;
101
102 MALI_DEBUG_ASSERT_POINTER(descriptor);
103 MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
104
105 INIT_LIST_HEAD(&descriptor->os_mem.pages);
106 descriptor->os_mem.count = page_count;
107
108 /* Grab pages from pool. */
109 {
110 size_t pool_pages;
111 spin_lock(&mali_mem_os_allocator.pool_lock);
112 pool_pages = min(remaining, mali_mem_os_allocator.pool_count);
113 for (i = pool_pages; i > 0; i--) {
114 BUG_ON(list_empty(&mali_mem_os_allocator.pool_pages));
115 list_move(mali_mem_os_allocator.pool_pages.next, &pages);
116 }
117 mali_mem_os_allocator.pool_count -= pool_pages;
118 remaining -= pool_pages;
119 spin_unlock(&mali_mem_os_allocator.pool_lock);
120 }
121
122 /* Process pages from pool. */
123 i = 0;
124 list_for_each_entry_safe(new_page, tmp, &pages, lru) {
125 BUG_ON(NULL == new_page);
126
127 list_move_tail(&new_page->lru, &descriptor->os_mem.pages);
128 }
129
130 /* Allocate new pages, if needed. */
131 for (i = 0; i < remaining; i++) {
132 dma_addr_t dma_addr;
133
134 new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
135
136 if (unlikely(NULL == new_page)) {
137 /* Calculate the number of pages actually allocated, and free them. */
138 descriptor->os_mem.count = (page_count - remaining) + i;
139 atomic_add(descriptor->os_mem.count, &mali_mem_os_allocator.allocated_pages);
140 mali_mem_os_free(descriptor);
141 return -ENOMEM;
142 }
143
144 /* Ensure page is flushed from CPU caches. */
145 dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
146 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
147
148 /* Store page phys addr */
149 SetPagePrivate(new_page);
150 set_page_private(new_page, dma_addr);
151
152 list_add_tail(&new_page->lru, &descriptor->os_mem.pages);
153 }
154
155 atomic_add(page_count, &mali_mem_os_allocator.allocated_pages);
156
157 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
158 MALI_DEBUG_PRINT(4, ("OS Mem: Stopping pool trim timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
159 cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
160 }
161
162 return 0;
163 }
164
165 static int mali_mem_os_mali_map(mali_mem_allocation *descriptor, struct mali_session_data *session)
166 {
167 struct mali_page_directory *pagedir = session->page_directory;
168 struct page *page;
169 _mali_osk_errcode_t err;
170 u32 virt = descriptor->mali_mapping.addr;
171 u32 prop = descriptor->mali_mapping.properties;
172
173 MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
174
175 err = mali_mem_mali_map_prepare(descriptor);
176 if (_MALI_OSK_ERR_OK != err) {
177 return -ENOMEM;
178 }
179
180 list_for_each_entry(page, &descriptor->os_mem.pages, lru) {
181 u32 phys = page_private(page);
182 mali_mmu_pagedir_update(pagedir, virt, phys, MALI_MMU_PAGE_SIZE, prop);
183 virt += MALI_MMU_PAGE_SIZE;
184 }
185
186 return 0;
187 }
188
189 static void mali_mem_os_mali_unmap(struct mali_session_data *session, mali_mem_allocation *descriptor)
190 {
191 mali_mem_mali_map_free(descriptor);
192 }
193
194 static int mali_mem_os_cpu_map(mali_mem_allocation *descriptor, struct vm_area_struct *vma)
195 {
196 struct page *page;
197 int ret;
198 unsigned long addr = vma->vm_start;
199
200 list_for_each_entry(page, &descriptor->os_mem.pages, lru) {
201 /* We should use vm_insert_page, but it does a dcache
202 * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
203 ret = vm_insert_page(vma, addr, page);
204 */
205 ret = vm_insert_pfn(vma, addr, page_to_pfn(page));
206
207 if (unlikely(0 != ret)) {
208 return -EFAULT;
209 }
210 addr += _MALI_OSK_MALI_PAGE_SIZE;
211 }
212
213 return 0;
214 }
215
216 mali_mem_allocation *mali_mem_os_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session)
217 {
218 mali_mem_allocation *descriptor;
219 int err;
220
221 if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
222 MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
223 size,
224 atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
225 mali_mem_os_allocator.allocation_limit));
226 return NULL;
227 }
228
229 descriptor = mali_mem_descriptor_create(session, MALI_MEM_OS);
230 if (NULL == descriptor) return NULL;
231
232 descriptor->mali_mapping.addr = mali_addr;
233 descriptor->size = size;
234 descriptor->cpu_mapping.addr = (void __user*)vma->vm_start;
235 descriptor->cpu_mapping.ref = 1;
236
237 if (VM_SHARED == (VM_SHARED & vma->vm_flags)) {
238 descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
239 } else {
240 /* Cached Mali memory mapping */
241 descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
242 vma->vm_flags |= VM_SHARED;
243 }
244
245 err = mali_mem_os_alloc_pages(descriptor, size); /* Allocate pages */
246 if (0 != err) goto alloc_failed;
247
248 /* Take session memory lock */
249 _mali_osk_mutex_wait(session->memory_lock);
250
251 err = mali_mem_os_mali_map(descriptor, session); /* Map on Mali */
252 if (0 != err) goto mali_map_failed;
253
254 _mali_osk_mutex_signal(session->memory_lock);
255
256 err = mali_mem_os_cpu_map(descriptor, vma); /* Map on CPU */
257 if (0 != err) goto cpu_map_failed;
258
259 return descriptor;
260
261 cpu_map_failed:
262 mali_mem_os_mali_unmap(session, descriptor);
263 mali_map_failed:
264 _mali_osk_mutex_signal(session->memory_lock);
265 mali_mem_os_free(descriptor);
266 alloc_failed:
267 mali_mem_descriptor_destroy(descriptor);
268 MALI_DEBUG_PRINT(2, ("OS allocator: Failed to allocate memory (%d)\n", err));
269 return NULL;
270 }
271
272 void mali_mem_os_release(mali_mem_allocation *descriptor)
273 {
274 struct mali_session_data *session = descriptor->session;
275
276 /* Unmap the memory from the mali virtual address space. */
277 mali_mem_os_mali_unmap(session, descriptor);
278
279 /* Free pages */
280 mali_mem_os_free(descriptor);
281 }
282
283
284 #define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128
285 static struct {
286 struct {
287 u32 phys;
288 mali_io_address mapping;
289 } page[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE];
290 u32 count;
291 spinlock_t lock;
292 } mali_mem_page_table_page_pool = {
293 .count = 0,
294 .lock = __SPIN_LOCK_UNLOCKED(pool_lock),
295 };
296
297 _mali_osk_errcode_t mali_mem_os_get_table_page(u32 *phys, mali_io_address *mapping)
298 {
299 _mali_osk_errcode_t ret = _MALI_OSK_ERR_NOMEM;
300
301 spin_lock(&mali_mem_page_table_page_pool.lock);
302 if (0 < mali_mem_page_table_page_pool.count) {
303 u32 i = --mali_mem_page_table_page_pool.count;
304 *phys = mali_mem_page_table_page_pool.page[i].phys;
305 *mapping = mali_mem_page_table_page_pool.page[i].mapping;
306
307 ret = _MALI_OSK_ERR_OK;
308 }
309 spin_unlock(&mali_mem_page_table_page_pool.lock);
310
311 if (_MALI_OSK_ERR_OK != ret) {
312 *mapping = dma_alloc_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, phys, GFP_KERNEL);
313 if (NULL != *mapping) {
314 ret = _MALI_OSK_ERR_OK;
315 }
316 }
317
318 if (ret != _MALI_OSK_ERR_OK)
319 {
320 MALI_DEBUG_PRINT(2, ("os_get_table_page fail: ret=%d, *mapping=%x\n", ret, *mapping));
321 MALI_DEBUG_PRINT(2, ("os_get_table_page fail: mali_mem_page_table_page_pool.count=%x\n", mali_mem_page_table_page_pool.count));
322 }
323 return ret;
324 }
325
326 void mali_mem_os_release_table_page(u32 phys, void *virt)
327 {
328 spin_lock(&mali_mem_page_table_page_pool.lock);
329 if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE > mali_mem_page_table_page_pool.count) {
330 u32 i = mali_mem_page_table_page_pool.count;
331 mali_mem_page_table_page_pool.page[i].phys = phys;
332 mali_mem_page_table_page_pool.page[i].mapping = virt;
333
334 ++mali_mem_page_table_page_pool.count;
335
336 spin_unlock(&mali_mem_page_table_page_pool.lock);
337 } else {
338 spin_unlock(&mali_mem_page_table_page_pool.lock);
339
340 dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
341 }
342 }
343
344 static void mali_mem_os_free_page(struct page *page)
345 {
346 BUG_ON(page_count(page) != 1);
347
348 dma_unmap_page(&mali_platform_device->dev, page_private(page),
349 _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
350
351 ClearPagePrivate(page);
352
353 __free_page(page);
354 }
355
356 /* The maximum number of page table pool pages to free in one go. */
357 #define MALI_MEM_OS_CHUNK_TO_FREE 64UL
358
359 /* Free a certain number of pages from the page table page pool.
360 * The pool lock must be held when calling the function, and the lock will be
361 * released before returning.
362 */
363 static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
364 {
365 u32 phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
366 void *virt_arr[MALI_MEM_OS_CHUNK_TO_FREE];
367 u32 i;
368
369 MALI_DEBUG_ASSERT(nr_to_free <= MALI_MEM_OS_CHUNK_TO_FREE);
370
371 /* Remove nr_to_free pages from the pool and store them locally on stack. */
372 for (i = 0; i < nr_to_free; i++) {
373 u32 pool_index = mali_mem_page_table_page_pool.count - i - 1;
374
375 phys_arr[i] = mali_mem_page_table_page_pool.page[pool_index].phys;
376 virt_arr[i] = mali_mem_page_table_page_pool.page[pool_index].mapping;
377 }
378
379 mali_mem_page_table_page_pool.count -= nr_to_free;
380
381 spin_unlock(&mali_mem_page_table_page_pool.lock);
382
383 /* After releasing the spinlock: free the pages we removed from the pool. */
384 for (i = 0; i < nr_to_free; i++) {
385 dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt_arr[i], phys_arr[i]);
386 }
387 }
388
389 static void mali_mem_os_trim_page_table_page_pool(void)
390 {
391 size_t nr_to_free = 0;
392 size_t nr_to_keep;
393
394 /* Keep 2 page table pages for each 1024 pages in the page cache. */
395 nr_to_keep = mali_mem_os_allocator.pool_count / 512;
396 /* And a minimum of eight pages, to accomodate new sessions. */
397 nr_to_keep += 8;
398
399 if (0 == spin_trylock(&mali_mem_page_table_page_pool.lock)) return;
400
401 if (nr_to_keep < mali_mem_page_table_page_pool.count) {
402 nr_to_free = mali_mem_page_table_page_pool.count - nr_to_keep;
403 nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, nr_to_free);
404 }
405
406 /* Pool lock will be released by the callee. */
407 mali_mem_os_page_table_pool_free(nr_to_free);
408 }
409
410 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
411 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
412 static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask)
413 #else
414 static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask)
415 #endif
416 #else
417 static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
418 #endif
419 {
420 struct page *page, *tmp;
421 unsigned long flags;
422 struct list_head *le, pages;
423 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
424 int nr = nr_to_scan;
425 #else
426 int nr = sc->nr_to_scan;
427 #endif
428
429 if (0 == nr) {
430 //[BUGFIX]-Mod-BEGIN by SCDTABLET.zhangku.guo@tcl.com,06/09/2015,1017702,mtk patch for fix ANR issue when TAT
431 //return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
432 return mali_mem_os_allocator.pool_count;
433 //[BUGFIX]-Mod-END by SCDTABLET.zhangku.guo@tcl.com
434 }
435
436 if (0 == mali_mem_os_allocator.pool_count) {
437 /* No pages availble */
438 return 0;
439 }
440
441 if (0 == spin_trylock_irqsave(&mali_mem_os_allocator.pool_lock, flags)) {
442 /* Not able to lock. */
443 return -1;
444 }
445
446 /* Release from general page pool */
447 nr = min((size_t)nr, mali_mem_os_allocator.pool_count);
448 mali_mem_os_allocator.pool_count -= nr;
449 list_for_each(le, &mali_mem_os_allocator.pool_pages) {
450 --nr;
451 if (0 == nr) break;
452 }
453 list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
454 spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
455
456 list_for_each_entry_safe(page, tmp, &pages, lru) {
457 mali_mem_os_free_page(page);
458 }
459
460 //[BUGFIX]-Del-BEGIN by SCDTABLET.zhangku.guo@tcl.com,06/09/2015,1017702,mtk patch for fix ANR issue when TAT
461 /* Release some pages from page table page pool */
462 //mali_mem_os_trim_page_table_page_pool();
463 //[BUGFIX]-Del-END by SCDTABLET.zhangku.guo@tcl.com
464
465 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
466 /* Pools are empty, stop timer */
467 MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
468 cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
469 }
470
471 //[BUGFIX]-Mod-BEGIN by SCDTABLET.zhangku.guo@tcl.com,06/09/2015,1017702,mtk patch for fix ANR issue when TAT
472 //return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
473 return mali_mem_os_allocator.pool_count;
474 //[BUGFIX]-Mod-END by SCDTABLET.zhangku.guo@tcl.com
475 }
476
477 static void mali_mem_os_trim_pool(struct work_struct *data)
478 {
479 struct page *page, *tmp;
480 struct list_head *le;
481 LIST_HEAD(pages);
482 size_t nr_to_free;
483
484 MALI_IGNORE(data);
485
486 MALI_DEBUG_PRINT(3, ("OS Mem: Trimming pool %u\n", mali_mem_os_allocator.pool_count));
487
488 /* Release from general page pool */
489 spin_lock(&mali_mem_os_allocator.pool_lock);
490 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
491 size_t count = mali_mem_os_allocator.pool_count - MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES;
492 /* Free half the pages on the pool above the static limit. Or 64 pages, 256KB. */
493 nr_to_free = max(count / 2, (size_t)64);
494
495 mali_mem_os_allocator.pool_count -= nr_to_free;
496 list_for_each(le, &mali_mem_os_allocator.pool_pages) {
497 --nr_to_free;
498 if (0 == nr_to_free) break;
499 }
500 list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
501 }
502 spin_unlock(&mali_mem_os_allocator.pool_lock);
503
504 list_for_each_entry_safe(page, tmp, &pages, lru) {
505 mali_mem_os_free_page(page);
506 }
507
508 /* Release some pages from page table page pool */
509 mali_mem_os_trim_page_table_page_pool();
510
511 if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
512 MALI_DEBUG_PRINT(4, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
513 queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
514 }
515 }
516
517 _mali_osk_errcode_t mali_mem_os_init(void)
518 {
519 mali_mem_os_allocator.wq = alloc_workqueue("mali-mem", WQ_UNBOUND, 1);
520 if (NULL == mali_mem_os_allocator.wq) {
521 return _MALI_OSK_ERR_NOMEM;
522 }
523
524 register_shrinker(&mali_mem_os_allocator.shrinker);
525
526 return _MALI_OSK_ERR_OK;
527 }
528
529 void mali_mem_os_term(void)
530 {
531 struct page *page, *tmp;
532
533 unregister_shrinker(&mali_mem_os_allocator.shrinker);
534 cancel_delayed_work_sync(&mali_mem_os_allocator.timed_shrinker);
535 destroy_workqueue(mali_mem_os_allocator.wq);
536
537 spin_lock(&mali_mem_os_allocator.pool_lock);
538 list_for_each_entry_safe(page, tmp, &mali_mem_os_allocator.pool_pages, lru) {
539 mali_mem_os_free_page(page);
540
541 --mali_mem_os_allocator.pool_count;
542 }
543 BUG_ON(mali_mem_os_allocator.pool_count);
544 spin_unlock(&mali_mem_os_allocator.pool_lock);
545
546 /* Release from page table page pool */
547 do {
548 u32 nr_to_free;
549
550 spin_lock(&mali_mem_page_table_page_pool.lock);
551
552 nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, mali_mem_page_table_page_pool.count);
553
554 /* Pool lock will be released by the callee. */
555 mali_mem_os_page_table_pool_free(nr_to_free);
556 } while (0 != mali_mem_page_table_page_pool.count);
557 }
558
559 _mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size)
560 {
561 mali_mem_os_allocator.allocation_limit = size;
562
563 MALI_SUCCESS;
564 }
565
566 u32 mali_mem_os_stat(void)
567 {
568 return atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE;
569 }