#include <linux/sysctl.h>
#include <linux/oom.h>
#include <linux/prefetch.h>
+#include <linux/debugfs.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
#include <linux/swapops.h>
+#include <linux/balloon_compaction.h>
#include "internal.h"
return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
}
+struct dentry *debug_file;
+
+static int debug_shrinker_show(struct seq_file *s, void *unused)
+{
+ struct shrinker *shrinker;
+ struct shrink_control sc;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ down_read(&shrinker_rwsem);
+ list_for_each_entry(shrinker, &shrinker_list, list) {
+ int num_objs;
+
+ num_objs = shrinker->shrink(shrinker, &sc);
+ seq_printf(s, "%pf %d\n", shrinker->shrink, num_objs);
+ }
+ up_read(&shrinker_rwsem);
+ return 0;
+}
+
+static int debug_shrinker_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_shrinker_show, inode->i_private);
+}
+
+static const struct file_operations debug_shrinker_fops = {
+ .open = debug_shrinker_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* Add a shrinker callback to be called from the vm
*/
}
EXPORT_SYMBOL(register_shrinker);
+static int __init add_shrinker_debug(void)
+{
+ debugfs_create_file("shrinker", 0644, NULL, NULL,
+ &debug_shrinker_fops);
+ return 0;
+}
+
+late_initcall(add_shrinker_debug);
+
/*
* Remove one
*/
* could easily OOM just because too many pages are in
* writeback and there is nothing else to reclaim.
*
- * Check __GFP_IO, certainly because a loop driver
+ * Require may_enter_fs to wait on writeback, because
+ * fs may not have submitted IO yet. And a loop driver
* thread might enter reclaim, and deadlock if it waits
* on a page for which it is needed to do the write
* (loop masks off __GFP_IO|__GFP_FS for this reason);
* but more thought would probably show more reasons.
- *
- * Don't require __GFP_FS, since we're not going into
- * the FS, just waiting on its writeback completion.
- * Worryingly, ext4 gfs2 and xfs allocate pages with
- * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so
- * testing may_enter_fs here is liable to OOM on them.
*/
if (global_reclaim(sc) ||
- !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
+ !PageReclaim(page) || !may_enter_fs) {
/*
* This is slightly racy - end_page_writeback()
* might have just cleared PageReclaim, then
LIST_HEAD(clean_pages);
list_for_each_entry_safe(page, next, page_list, lru) {
- if (page_is_file_cache(page) && !PageDirty(page)) {
+ if (page_is_file_cache(page) && !PageDirty(page) &&
+ !isolated_balloon_page(page)) {
ClearPageActive(page);
list_move(&page->lru, &clean_pages);
}
{
unsigned long inactive, isolated;
- if (current_is_kswapd())
+ if (current_is_kswapd() || sc->hibernation_mode)
return 0;
if (!global_reclaim(sc))
SCAN_FILE,
};
+
+#ifdef CONFIG_ZRAM
+static int vmscan_swap_file_ratio = 1;
+module_param_named(swap_file_ratio, vmscan_swap_file_ratio, int, S_IRUGO | S_IWUSR);
+
+#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
+
+// vmscan debug
+static int vmscan_swap_sum = 200;
+module_param_named(swap_sum, vmscan_swap_sum, int, S_IRUGO | S_IWUSR);
+
+
+static int vmscan_scan_file_sum = 0;
+static int vmscan_scan_anon_sum = 0;
+static int vmscan_recent_scanned_anon = 0;
+static int vmscan_recent_scanned_file = 0;
+static int vmscan_recent_rotated_anon = 0;
+static int vmscan_recent_rotated_file = 0;
+module_param_named(scan_file_sum, vmscan_scan_file_sum, int, S_IRUGO);
+module_param_named(scan_anon_sum, vmscan_scan_anon_sum, int, S_IRUGO);
+module_param_named(recent_scanned_anon, vmscan_recent_scanned_anon, int, S_IRUGO);
+module_param_named(recent_scanned_file, vmscan_recent_scanned_file, int, S_IRUGO);
+module_param_named(recent_rotated_anon, vmscan_recent_rotated_anon, int, S_IRUGO);
+module_param_named(recent_rotated_file, vmscan_recent_rotated_file, int, S_IRUGO);
+#endif // CONFIG_ZRAM
+
+
+#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
+//#define LOGTAG "VMSCAN"
+static unsigned long t=0;
+static unsigned long history[2] = {0};
+extern int lowmem_minfree[9];
+#endif
+
+#endif // CONFIG_ZRAM
+
/*
* Determine how aggressively the anon and file LRU lists should be
* scanned. The relative value of each set of LRU lists is determined
bool force_scan = false;
unsigned long ap, fp;
enum lru_list lru;
+#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
+ int cpu;
+ unsigned long SwapinCount, SwapoutCount, cached;
+ bool bThrashing = false;
+#endif
/*
* If the zone or memcg is small, nr[l] can be 0. This
anon_prio = vmscan_swappiness(sc);
file_prio = 200 - anon_prio;
+ /*
+ * With swappiness at 100, anonymous and file have the same priority.
+ * This scanning priority is essentially the inverse of IO cost.
+ */
+#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
+ if (vmscan_swap_file_ratio) {
+
+ if(t == 0)
+ t = jiffies;
+
+ if (time_after(jiffies, t + 1 * HZ)) {
+
+ for_each_online_cpu(cpu) {
+ struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
+ SwapinCount += this->event[PSWPIN];
+ SwapoutCount += this->event[PSWPOUT];
+ }
+
+ if( ((SwapinCount-history[0] + SwapoutCount - history[1]) / (jiffies-t) * HZ) > 3000){
+ bThrashing = true;
+ //xlog_printk(ANDROID_LOG_ERROR, LOGTAG, "!!! thrashing !!!\n");
+ }else{
+ bThrashing = false;
+ //xlog_printk(ANDROID_LOG_WARN, LOGTAG, "!!! NO thrashing !!!\n");
+ }
+ history[0] = SwapinCount;
+ history[1] = SwapoutCount;
+
+
+ t=jiffies;
+ }
+
+
+ if(!bThrashing){
+ anon_prio = (vmscan_swappiness(sc) * anon) / (anon + file + 1);
+ file_prio = (vmscan_swap_sum - vmscan_swappiness(sc)) * file / (anon + file + 1);
+ //xlog_printk(ANDROID_LOG_DEBUG, LOGTAG, "1 anon_prio: %d, file_prio: %d \n", anon_prio, file_prio);
+
+ } else {
+ cached = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM) - total_swapcache_pages();
+ if(cached > lowmem_minfree[2]) {
+ anon_prio = vmscan_swappiness(sc);
+ file_prio = vmscan_swap_sum - vmscan_swappiness(sc);
+ //xlog_printk(ANDROID_LOG_ERROR, LOGTAG, "2 anon_prio: %d, file_prio: %d \n", anon_prio, file_prio);
+ } else {
+ anon_prio = (vmscan_swappiness(sc) * anon) / (anon + file + 1);
+ file_prio = (vmscan_swap_sum - vmscan_swappiness(sc)) * file / (anon + file + 1);
+ //xlog_printk(ANDROID_LOG_ERROR, LOGTAG, "3 anon_prio: %d, file_prio: %d \n", anon_prio, file_prio);
+ }
+ }
+
+ } else {
+ anon_prio = vmscan_swappiness(sc);
+ file_prio = vmscan_swap_sum - vmscan_swappiness(sc);
+ }
+#elif defined(CONFIG_ZRAM) // CONFIG_ZRAM
+ if (vmscan_swap_file_ratio) {
+ anon_prio = anon_prio * anon / (anon + file + 1);
+ file_prio = file_prio * file / (anon + file + 1);
+ }
+#endif // CONFIG_ZRAM
+
+
+
/*
* OK, so we have swap space and a fair amount of page cache
* pages. We use the recently rotated / recently scanned
if (zone->all_unreclaimable &&
sc->priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
- if (IS_ENABLED(CONFIG_COMPACTION)) {
+ if (IS_ENABLED(CONFIG_COMPACTION) && !sc->hibernation_mode) {
/*
* If we already have plenty of memory free for
* compaction in this zone, don't free any more.
return aborted_reclaim;
}
+static unsigned long zone_reclaimable_pages(struct zone *zone)
+{
+ int nr;
+
+ nr = zone_page_state(zone, NR_ACTIVE_FILE) +
+ zone_page_state(zone, NR_INACTIVE_FILE);
+
+ if (get_nr_swap_pages() > 0)
+ nr += zone_page_state(zone, NR_ACTIVE_ANON) +
+ zone_page_state(zone, NR_INACTIVE_ANON);
+
+ return nr;
+}
+
static bool zone_reclaimable(struct zone *zone)
{
return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
unsigned long writeback_threshold;
bool aborted_reclaim;
+#ifdef CONFIG_FREEZER
+ if (unlikely(pm_freezing && !sc->hibernation_mode))
+ return 0;
+#endif
+
delayacct_freepages_start();
if (global_reclaim(sc))
for (i = 0; i <= ZONE_NORMAL; i++) {
zone = &pgdat->node_zones[i];
+ if (!populated_zone(zone))
+ continue;
+
pfmemalloc_reserve += min_wmark_pages(zone);
free_pages += zone_page_state(zone, NR_FREE_PAGES);
}
+ /* If there are no reserves (unexpected config) then do not throttle */
+ if (!pfmemalloc_reserve)
+ return true;
+
wmark_ok = free_pages > pfmemalloc_reserve / 2;
/* kswapd must be awake if processes are being throttled */
static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
nodemask_t *nodemask)
{
+ struct zoneref *z;
struct zone *zone;
- int high_zoneidx = gfp_zone(gfp_mask);
- pg_data_t *pgdat;
+ pg_data_t *pgdat = NULL;
/*
* Kernel threads should not be throttled as they may be indirectly
if (fatal_signal_pending(current))
goto out;
- /* Check if the pfmemalloc reserves are ok */
- first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
- pgdat = zone->zone_pgdat;
- if (pfmemalloc_watermark_ok(pgdat))
+ /*
+ * Check if the pfmemalloc reserves are ok by finding the first node
+ * with a usable ZONE_NORMAL or lower zone. The expectation is that
+ * GFP_KERNEL will be required for allocating network buffers when
+ * swapping over the network so ZONE_HIGHMEM is unusable.
+ *
+ * Throttling is based on the first usable node and throttled processes
+ * wait on a queue until kswapd makes progress and wakes them. There
+ * is an affinity then between processes waking up and where reclaim
+ * progress has been made assuming the process wakes on the same node.
+ * More importantly, processes running on remote nodes will not compete
+ * for remote pfmemalloc reserves and processes on different nodes
+ * should make reasonable progress.
+ */
+ for_each_zone_zonelist_nodemask(zone, z, zonelist,
+ gfp_mask, nodemask) {
+ if (zone_idx(zone) > ZONE_NORMAL)
+ continue;
+
+ /* Throttle based on the first usable node */
+ pgdat = zone->zone_pgdat;
+ if (pfmemalloc_watermark_ok(pgdat))
+ goto out;
+ break;
+ }
+
+ /* If no zone was usable by the allocation flags then do not throttle */
+ if (!pgdat)
goto out;
/* Account for the throttling */
return false;
/*
- * There is a potential race between when kswapd checks its watermarks
- * and a process gets throttled. There is also a potential race if
- * processes get throttled, kswapd wakes, a large process exits therby
- * balancing the zones that causes kswapd to miss a wakeup. If kswapd
- * is going to sleep, no process should be sleeping on pfmemalloc_wait
- * so wake them now if necessary. If necessary, processes will wake
- * kswapd and get throttled again
+ * The throttled processes are normally woken up in balance_pgdat() as
+ * soon as pfmemalloc_watermark_ok() is true. But there is a potential
+ * race between when kswapd checks the watermarks and a process gets
+ * throttled. There is also a potential race if processes get
+ * throttled, kswapd wakes, a large process exits thereby balancing the
+ * zones, which causes kswapd to exit balance_pgdat() before reaching
+ * the wake up checks. If kswapd is going to sleep, no process should
+ * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
+ * the wake up is premature, processes will wake kswapd and get
+ * throttled again. The difference from wake ups in balance_pgdat() is
+ * that here we are under prepare_to_wait().
*/
- if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
- wake_up(&pgdat->pfmemalloc_wait);
- return false;
- }
+ if (waitqueue_active(&pgdat->pfmemalloc_wait))
+ wake_up_all(&pgdat->pfmemalloc_wait);
return pgdat_balanced(pgdat, order, classzone_idx);
}
}
}
+ tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
current->reclaim_state = NULL;
+ lockdep_clear_current_reclaim_state();
+
return 0;
}
if (!populated_zone(zone))
return;
+#ifdef CONFIG_FREEZER
+ if (pm_freezing)
+ return;
+#endif
+
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
return;
pgdat = zone->zone_pgdat;
wake_up_interruptible(&pgdat->kswapd_wait);
}
-/*
- * The reclaimable count would be mostly accurate.
- * The less reclaimable pages may be
- * - mlocked pages, which will be moved to unevictable list when encountered
- * - mapped pages, which may require several travels to be reclaimed
- * - dirty pages, which is not "instantly" reclaimable
- */
-unsigned long global_reclaimable_pages(void)
-{
- int nr;
-
- nr = global_page_state(NR_ACTIVE_FILE) +
- global_page_state(NR_INACTIVE_FILE);
-
- if (get_nr_swap_pages() > 0)
- nr += global_page_state(NR_ACTIVE_ANON) +
- global_page_state(NR_INACTIVE_ANON);
-
- return nr;
-}
-
-unsigned long zone_reclaimable_pages(struct zone *zone)
-{
- int nr;
-
- nr = zone_page_state(zone, NR_ACTIVE_FILE) +
- zone_page_state(zone, NR_INACTIVE_FILE);
-
- if (get_nr_swap_pages() > 0)
- nr += zone_page_state(zone, NR_ACTIVE_ANON) +
- zone_page_state(zone, NR_INACTIVE_ANON);
-
- return nr;
-}
-
#ifdef CONFIG_HIBERNATION
/*
* Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
* LRU order by reclaiming preferentially
* inactive > active > active referenced > active mapped
*/
-unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
+unsigned long shrink_memory_mask(unsigned long nr_to_reclaim, gfp_t mask)
{
struct reclaim_state reclaim_state;
struct scan_control sc = {
- .gfp_mask = GFP_HIGHUSER_MOVABLE,
+ .gfp_mask = mask,
.may_swap = 1,
.may_unmap = 1,
.may_writepage = 1,
return nr_reclaimed;
}
+EXPORT_SYMBOL_GPL(shrink_memory_mask);
+
+#ifdef CONFIG_MTKPASR
+extern void shrink_mtkpasr_all(void);
+#else
+#define shrink_mtkpasr_all() do {} while (0)
+#endif
+unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
+{
+ shrink_mtkpasr_all();
+ return shrink_memory_mask(nr_to_reclaim, GFP_HIGHUSER_MOVABLE);
+}
+EXPORT_SYMBOL_GPL(shrink_all_memory);
#endif /* CONFIG_HIBERNATION */
/* It's optimal to keep kswapds on the same CPUs as their memory, but
device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
}
#endif
+
+#ifdef CONFIG_MTKPASR
+void try_to_shrink_slab(void)
+{
+ struct shrinker *shrinker;
+ struct shrink_control shrink = {
+ .gfp_mask = GFP_KERNEL|__GFP_HIGHMEM,
+ };
+
+ if (!down_read_trylock(&shrinker_rwsem)) {
+ return;
+ }
+
+ list_for_each_entry(shrinker, &shrinker_list, list) {
+ int num_objs;
+ int shrink_ret = 0;
+ int retry = 2;
+
+ num_objs = do_shrinker_shrink(shrinker, &shrink, 0);
+ if (num_objs <= 0)
+ continue;
+
+ do {
+ /* To shrink */
+ shrink_ret = do_shrinker_shrink(shrinker, &shrink, num_objs);
+ if (shrink_ret == -1)
+ break;
+ /* Check empty */
+ num_objs = do_shrinker_shrink(shrinker, &shrink, 0);
+ if (num_objs <= 0)
+ break;
+ } while (--retry);
+ }
+
+ up_read(&shrinker_rwsem);
+}
+
+extern void free_hot_cold_page(struct page *page, int cold);
+/* Isolate pages for PASR */
+#ifdef CONFIG_MTKPASR_ALLEXTCOMP
+int mtkpasr_isolate_page(struct page *page, int check_swap)
+#else
+int mtkpasr_isolate_page(struct page *page)
+#endif
+{
+ struct zone *zone = page_zone(page);
+ struct lruvec *lruvec;
+ unsigned long flags;
+ isolate_mode_t mode = ISOLATE_ASYNC_MIGRATE;
+
+ /* Lock this zone - USE trylock version! */
+ if (!spin_trylock_irqsave(&zone->lru_lock, flags)) {
+ printk(KERN_ALERT"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
+ printk(KERN_ALERT"[%s][%d] Failed to lock this zone!\n",__FUNCTION__,__LINE__);
+ printk(KERN_ALERT"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
+ return -EAGAIN;
+ }
+
+#ifdef CONFIG_MTKPASR_ALLEXTCOMP
+ /* Check whether we should handle SwapBacked, SwapCache pages */
+ if (check_swap) {
+ if (PageSwapBacked(page) || PageSwapCache(page)) {
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+ return -EACCES;
+ }
+ }
+#endif
+
+ /* Try to isolate this page */
+ if (__isolate_lru_page(page, mode) != 0) {
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+ return -EACCES;
+ }
+
+ /* Successfully isolated */
+ lruvec = mem_cgroup_page_lruvec(page, zone);
+ del_page_from_lru_list(page, lruvec, page_lru(page));
+
+ /* Unlock this zone */
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+
+ return 0;
+}
+
+/* Drop page (in File/Anon LRUs) (Imitate the behavior of shrink_page_list) */
+/* If returns error, caller needs to putback page by itself. */
+int mtkpasr_drop_page(struct page *page)
+{
+ int ret;
+ unsigned long vm_flags = 0x0;
+ bool active = false;
+ struct address_space *mapping;
+ enum ttu_flags unmap_flags = TTU_UNMAP;
+
+ /* Suitable scan control */
+ struct scan_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .order = PAGE_ALLOC_COSTLY_ORDER + 1,
+ //.reclaim_mode = RECLAIM_MODE_SINGLE|RECLAIM_MODE_SYNC, // We only handle "SwapBacked" pages in this reclaim_mode!
+ };
+
+ /* Try to isolate this page */
+#ifdef CONFIG_MTKPASR_ALLEXTCOMP
+ ret = mtkpasr_isolate_page(page, 0x1);
+#else
+ ret = mtkpasr_isolate_page(page);
+#endif
+ if (ret) {
+ return ret;
+ }
+
+ /* Check whether it is evictable! */
+ if (unlikely(!page_evictable(page))) {
+ putback_lru_page(page);
+ return -EACCES;
+ }
+
+ /* If it is Active, reference and deactivate it */
+ if (PageActive(page)) {
+ active = TestClearPageActive(page);
+ }
+
+ /* If we fail to lock this page, ignore it */
+ if (!trylock_page(page)) {
+ goto putback;
+ }
+
+ /* If page is in writeback, we don't handle it here! */
+ if (PageWriteback(page)) {
+ goto unlock;
+ }
+
+ /*
+ * Anonymous process memory has backing store?
+ * Try to allocate it some swap space here.
+ */
+ if (PageAnon(page) && !PageSwapCache(page)) {
+ /* Check whether we have enough free memory */
+ if (vm_swap_full()) {
+ goto unlock;
+ }
+
+ /* Ok! It is safe to add this page to swap. */
+ if (!add_to_swap(page, NULL)){
+ goto unlock;
+ }
+ }
+
+ /* We don't handle dirty file cache here (Related devices may be suspended) */
+ if (page_is_file_cache(page)) {
+ /* How do we handle pages in VM_EXEC vmas? */
+ if ((vm_flags & VM_EXEC)) {
+ goto unlock;
+ }
+ /* We don't handle dirty file pages! */
+ if (PageDirty(page)) {
+#ifdef CONFIG_MTKPASR_DEBUG
+ printk(KERN_ALERT "\n\n\n\n\n\n [%s][%d]\n\n\n\n\n\n",__FUNCTION__,__LINE__);
+#endif
+ goto unlock;
+ }
+ }
+
+ /*
+ * The page is mapped into the page tables of one or more
+ * processes. Try to unmap it here.
+ */
+ mapping = page_mapping(page);
+ if (page_mapped(page) && mapping) {
+#if 0
+ /* Indicate unmap action for SwapBacked pages */
+ if (PageSwapBacked(page)) {
+ unmap_flags |= TTU_IGNORE_ACCESS;
+ }
+#endif
+ /* To unmap */
+ switch (try_to_unmap(page, unmap_flags)) {
+ case SWAP_SUCCESS:
+ /* try to free the page below */
+ break;
+ case SWAP_FAIL:
+ goto restore_swap;
+ case SWAP_AGAIN:
+ goto restore_swap;
+ case SWAP_MLOCK:
+ goto restore_swap;
+
+ }
+ }
+
+ /* Check whether it is dirtied.
+ * We have filtered out dirty file pages above. (IMPORTANT!)
+ * "VM_BUG_ON(!PageSwapBacked(page))"
+ * */
+ if (PageDirty(page)) {
+ /* Page is dirty, try to write it out here */
+ /* It's ok for zram swap! */
+ /* Should we need to apply GFP_IOFS? */
+ switch (pageout(page, mapping, &sc)) {
+ case PAGE_SUCCESS:
+ if (PageWriteback(page)) {
+ goto putback;
+ }
+ if (PageDirty(page)) {
+ goto putback;
+ }
+
+ /*
+ * A synchronous write - probably a ramdisk. Go
+ * ahead and try to reclaim the page.
+ */
+ if (!trylock_page(page)) {
+ goto putback;
+ }
+ if (PageDirty(page) || PageWriteback(page)) {
+ goto unlock;
+ }
+ mapping = page_mapping(page);
+ case PAGE_CLEAN:
+ /* try to free the page below */
+ break;
+ default:
+#ifdef CONFIG_MTKPASR_DEBUG
+ /*printk(KERN_ALERT "\n\n\n\n\n\n [%s][%d]\n\n\n\n\n\n",__FUNCTION__,__LINE__);*/
+#endif
+ goto restore_unmap;
+ }
+ }
+
+ /* Release buffer */
+ if (page_has_private(page)) {
+ if (!try_to_release_page(page, sc.gfp_mask)) {
+ goto unlock;
+ }
+ if (!mapping && page_count(page) == 1) {
+ unlock_page(page);
+ if (put_page_testzero(page)) {
+ goto freeit;
+ } else {
+ /* Race! TOCHECK */
+ printk(KERN_ALERT "\n\n\n\n\n\n [%s][%d] RACE!!\n\n\n\n\n\n",__FUNCTION__,__LINE__);
+ goto notask;
+ }
+ }
+ }
+ if (!mapping || !__remove_mapping(mapping, page)) {
+ goto unlock;
+ }
+
+ __clear_page_locked(page);
+
+freeit:
+ free_hot_cold_page(page, 0);
+ return 0;
+
+restore_unmap:
+ /* Do something */
+
+restore_swap:
+ if (PageSwapCache(page))
+ try_to_free_swap(page);
+
+unlock:
+ unlock_page(page);
+
+putback:
+ /* Activate it again if needed! */
+ if (active)
+ SetPageActive(page);
+
+ /* We don't putback them to corresponding LRUs, because we want to do more tasks outside this function!
+ putback_lru_page(page); */
+
+ /* Failedly dropped pages. Do migration! */
+ return -EBUSY;
+
+notask:
+ return 0;
+}
+#endif