return 0;
/* offset in TRBs */
segment_offset = trb - seg->trbs;
- if (segment_offset > TRBS_PER_SEGMENT)
+ if (segment_offset >= TRBS_PER_SEGMENT)
return 0;
return seg->dma + (segment_offset * sizeof(*trb));
}
* carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared).
*/
+ #ifdef CONFIG_MTK_XHCI
+ if (!xhci_link_trb_quirk(xhci)) {
+ #else
if (!(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST))
&& !xhci_link_trb_quirk(xhci)) {
+ #endif
next->link.control &=
cpu_to_le32(~TRB_CHAIN);
next->link.control |=
static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs)
{
+#ifndef CONFIG_MTK_XHCI
int num_trbs_in_deq_seg;
+#endif
if (ring->num_trbs_free < num_trbs)
return 0;
+#ifndef CONFIG_MTK_XHCI
if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
return 0;
}
+#endif
return 1;
}
if (urb_priv->td_cnt == urb_priv->length) {
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+ #ifndef CONFIG_MTK_XHCI
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_enable();
}
+ #endif
}
usb_hcd_unlink_urb_from_ep(hcd, urb);
ret = 1;
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+ #ifndef CONFIG_MTK_XHCI
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
== 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_enable();
}
+ #endif
}
}
}
* successful event after a short transfer.
* Ignore it.
*/
- if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+ if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
ep_ring->last_td_was_short) {
ep_ring->last_td_was_short = false;
ret = 0;
/* If we're not dealing with 0.95 hardware or isoc rings
* on AMD 0.96 host, clear the chain bit.
*/
+ #ifndef CONFIG_MTK_XHCI
if (!xhci_link_trb_quirk(xhci) &&
!(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
next->link.control &= cpu_to_le32(~TRB_CHAIN);
else
next->link.control |= cpu_to_le32(TRB_CHAIN);
-
+ #else
+ next->link.control &= cpu_to_le32(~TRB_CHAIN);
+ #endif
wmb();
next->link.control ^= cpu_to_le32(TRB_CYCLE);
* right shifted by 10.
* It must fit in bits 21:17, so it can't be bigger than 31.
*/
+#ifdef CONFIG_MTK_XHCI
+static u32 xhci_td_remainder(unsigned int td_transfer_size, unsigned int td_running_total
+ , unsigned int maxp, unsigned trb_buffer_length)
+{
+ u32 max = 31;
+ int remainder, td_packet_count, packet_transferred;
+
+ //0 for the last TRB
+ //FIXME: need to workaround if there is ZLP in this TD
+ if (td_running_total + trb_buffer_length == td_transfer_size)
+ return 0;
+
+ //FIXME: need to take care of high-bandwidth (MAX_ESIT)
+ packet_transferred = (td_running_total /*+ trb_buffer_length*/) / maxp;
+ td_packet_count = DIV_ROUND_UP(td_transfer_size, maxp);
+ remainder = td_packet_count - packet_transferred;
+
+ if (remainder > max)
+ return max << 17;
+ else
+ return remainder << 17;
+}
+#else
static u32 xhci_td_remainder(unsigned int remainder)
{
u32 max = (1 << (21 - 17 + 1)) - 1;
else
return (remainder >> 10) << 17;
}
+#endif
+
+#ifndef CONFIG_MTK_XHCI
/*
* For xHCI 1.0 host controllers, TD size is the number of max packet sized
* packets remaining in the TD (*not* including this TRB).
return 31 << 17;
return (total_packet_count - packets_transferred) << 17;
}
+#endif
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
(unsigned int) addr + trb_buff_len);
}
+ /* Set the TRB length, TD size, and interrupter fields. */
+ #ifdef CONFIG_MTK_XHCI
+ if(num_trbs >1){
+ remainder = xhci_td_remainder(urb->transfer_buffer_length,
+ running_total, urb->ep->desc.wMaxPacketSize, trb_buff_len);
+ }
+ #else
/* Set the TRB length, TD size, and interrupter fields. */
if (xhci->hci_version < 0x100) {
remainder = xhci_td_remainder(
trb_buff_len, total_packet_count, urb,
num_trbs - 1);
}
+ #endif
+
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
bool more_trbs_coming;
int start_cycle;
u32 field, length_field;
-
+#ifdef CONFIG_MTK_XHCI
+ int max_packet = USB_SPEED_HIGH;
+#endif
int running_total, trb_buff_len, ret;
unsigned int total_packet_count;
u64 addr;
}
/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
+#ifdef CONFIG_MTK_XHCI
+ switch(urb->dev->speed){
+ case USB_SPEED_SUPER:
+ max_packet = urb->ep->desc.wMaxPacketSize;
+ break;
+ case USB_SPEED_HIGH:
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ default:
+ max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff;
+ break;
+ }
+ if((urb->transfer_flags & URB_ZERO_PACKET)
+ && ((urb->transfer_buffer_length % max_packet) == 0)){
+ num_trbs++;
+ }
+#endif
+
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
/* Only set interrupt on short packet for IN endpoints */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
-
+ #ifdef CONFIG_MTK_XHCI
+ remainder = xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
+ #else
/* Set the TRB length, TD size, and interrupter fields. */
if (xhci->hci_version < 0x100) {
remainder = xhci_td_remainder(
trb_buff_len, total_packet_count, urb,
num_trbs - 1);
}
+ #endif
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
field |= 0x1;
/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
+#ifdef CONFIG_MTK_XHCI
+ if(1){
+#else
if (xhci->hci_version == 0x100) {
+#endif
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_TX_TYPE(TRB_DATA_IN);
field = TRB_TYPE(TRB_DATA);
length_field = TRB_LEN(urb->transfer_buffer_length) |
+ #ifdef CONFIG_MTK_XHCI
+ //CC: MTK style, no scatter-gather for control transfer
+ 0 |
+ #else
xhci_td_remainder(urb->transfer_buffer_length) |
+ #endif
TRB_INTR_TARGET(0);
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
u64 start_addr, addr;
int i, j;
bool more_trbs_coming;
+#ifdef CONFIG_MTK_XHCI
+ int max_packet = USB_SPEED_HIGH;
+#endif
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
+#ifdef CONFIG_MTK_XHCI
+ switch(urb->dev->speed){
+ case USB_SPEED_SUPER:
+ max_packet = urb->ep->desc.wMaxPacketSize;
+ break;
+ case USB_SPEED_HIGH:
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ default:
+ max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff;
+ break;
+ }
+#endif
urb_priv = urb->hcpriv;
/* Queue the first TRB, even if it's zero-length */
for (i = 0; i < num_tds; i++) {
trb_buff_len = td_remain_len;
/* Set the TRB length, TD size, & interrupter fields. */
+ #ifdef CONFIG_MTK_XHCI
+ remainder = xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
+ #else
if (xhci->hci_version < 0x100) {
remainder = xhci_td_remainder(
td_len - running_total);
total_packet_count, urb,
(trbs_per_td - j - 1));
}
+ #endif
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
goto cleanup;
}
}
-
+ #ifndef CONFIG_MTK_XHCI
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_disable();
}
+ #endif
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
#include <linux/sysctl.h>
#include <linux/oom.h>
#include <linux/prefetch.h>
+#include <linux/debugfs.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
}
+struct dentry *debug_file;
+
+static int debug_shrinker_show(struct seq_file *s, void *unused)
+{
+ struct shrinker *shrinker;
+ struct shrink_control sc;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ down_read(&shrinker_rwsem);
+ list_for_each_entry(shrinker, &shrinker_list, list) {
+ int num_objs;
+
+ num_objs = shrinker->shrink(shrinker, &sc);
+ seq_printf(s, "%pf %d\n", shrinker->shrink, num_objs);
+ }
+ up_read(&shrinker_rwsem);
+ return 0;
+}
+
+static int debug_shrinker_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_shrinker_show, inode->i_private);
+}
+
+static const struct file_operations debug_shrinker_fops = {
+ .open = debug_shrinker_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* Add a shrinker callback to be called from the vm
*/
}
EXPORT_SYMBOL(register_shrinker);
+static int __init add_shrinker_debug(void)
+{
+ debugfs_create_file("shrinker", 0644, NULL, NULL,
+ &debug_shrinker_fops);
+ return 0;
+}
+
+late_initcall(add_shrinker_debug);
+
/*
* Remove one
*/
* could easily OOM just because too many pages are in
* writeback and there is nothing else to reclaim.
*
- * Check __GFP_IO, certainly because a loop driver
+ * Require may_enter_fs to wait on writeback, because
+ * fs may not have submitted IO yet. And a loop driver
* thread might enter reclaim, and deadlock if it waits
* on a page for which it is needed to do the write
* (loop masks off __GFP_IO|__GFP_FS for this reason);
* but more thought would probably show more reasons.
- *
- * Don't require __GFP_FS, since we're not going into
- * the FS, just waiting on its writeback completion.
- * Worryingly, ext4 gfs2 and xfs allocate pages with
- * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so
- * testing may_enter_fs here is liable to OOM on them.
*/
if (global_reclaim(sc) ||
- !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
+ !PageReclaim(page) || !may_enter_fs) {
/*
* This is slightly racy - end_page_writeback()
* might have just cleared PageReclaim, then
{
unsigned long inactive, isolated;
- if (current_is_kswapd())
+ if (current_is_kswapd() || sc->hibernation_mode)
return 0;
if (!global_reclaim(sc))
SCAN_FILE,
};
+
+#ifdef CONFIG_ZRAM
+static int vmscan_swap_file_ratio = 1;
+module_param_named(swap_file_ratio, vmscan_swap_file_ratio, int, S_IRUGO | S_IWUSR);
+
+#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
+
+// vmscan debug
+static int vmscan_swap_sum = 200;
+module_param_named(swap_sum, vmscan_swap_sum, int, S_IRUGO | S_IWUSR);
+
+
+static int vmscan_scan_file_sum = 0;
+static int vmscan_scan_anon_sum = 0;
+static int vmscan_recent_scanned_anon = 0;
+static int vmscan_recent_scanned_file = 0;
+static int vmscan_recent_rotated_anon = 0;
+static int vmscan_recent_rotated_file = 0;
+module_param_named(scan_file_sum, vmscan_scan_file_sum, int, S_IRUGO);
+module_param_named(scan_anon_sum, vmscan_scan_anon_sum, int, S_IRUGO);
+module_param_named(recent_scanned_anon, vmscan_recent_scanned_anon, int, S_IRUGO);
+module_param_named(recent_scanned_file, vmscan_recent_scanned_file, int, S_IRUGO);
+module_param_named(recent_rotated_anon, vmscan_recent_rotated_anon, int, S_IRUGO);
+module_param_named(recent_rotated_file, vmscan_recent_rotated_file, int, S_IRUGO);
+#endif // CONFIG_ZRAM
+
+
+#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
+//#define LOGTAG "VMSCAN"
+static unsigned long t=0;
+static unsigned long history[2] = {0};
+extern int lowmem_minfree[9];
+#endif
+
+#endif // CONFIG_ZRAM
+
/*
* Determine how aggressively the anon and file LRU lists should be
* scanned. The relative value of each set of LRU lists is determined
bool force_scan = false;
unsigned long ap, fp;
enum lru_list lru;
+#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
+ int cpu;
+ unsigned long SwapinCount, SwapoutCount, cached;
+ bool bThrashing = false;
+#endif
/*
* If the zone or memcg is small, nr[l] can be 0. This
anon_prio = vmscan_swappiness(sc);
file_prio = 200 - anon_prio;
+ /*
+ * With swappiness at 100, anonymous and file have the same priority.
+ * This scanning priority is essentially the inverse of IO cost.
+ */
+#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
+ if (vmscan_swap_file_ratio) {
+
+ if(t == 0)
+ t = jiffies;
+
+ if (time_after(jiffies, t + 1 * HZ)) {
+
+ for_each_online_cpu(cpu) {
+ struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
+ SwapinCount += this->event[PSWPIN];
+ SwapoutCount += this->event[PSWPOUT];
+ }
+
+ if( ((SwapinCount-history[0] + SwapoutCount - history[1]) / (jiffies-t) * HZ) > 3000){
+ bThrashing = true;
+ //xlog_printk(ANDROID_LOG_ERROR, LOGTAG, "!!! thrashing !!!\n");
+ }else{
+ bThrashing = false;
+ //xlog_printk(ANDROID_LOG_WARN, LOGTAG, "!!! NO thrashing !!!\n");
+ }
+ history[0] = SwapinCount;
+ history[1] = SwapoutCount;
+
+
+ t=jiffies;
+ }
+
+
+ if(!bThrashing){
+ anon_prio = (vmscan_swappiness(sc) * anon) / (anon + file + 1);
+ file_prio = (vmscan_swap_sum - vmscan_swappiness(sc)) * file / (anon + file + 1);
+ //xlog_printk(ANDROID_LOG_DEBUG, LOGTAG, "1 anon_prio: %d, file_prio: %d \n", anon_prio, file_prio);
+
+ } else {
+ cached = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM) - total_swapcache_pages();
+ if(cached > lowmem_minfree[2]) {
+ anon_prio = vmscan_swappiness(sc);
+ file_prio = vmscan_swap_sum - vmscan_swappiness(sc);
+ //xlog_printk(ANDROID_LOG_ERROR, LOGTAG, "2 anon_prio: %d, file_prio: %d \n", anon_prio, file_prio);
+ } else {
+ anon_prio = (vmscan_swappiness(sc) * anon) / (anon + file + 1);
+ file_prio = (vmscan_swap_sum - vmscan_swappiness(sc)) * file / (anon + file + 1);
+ //xlog_printk(ANDROID_LOG_ERROR, LOGTAG, "3 anon_prio: %d, file_prio: %d \n", anon_prio, file_prio);
+ }
+ }
+
+ } else {
+ anon_prio = vmscan_swappiness(sc);
+ file_prio = vmscan_swap_sum - vmscan_swappiness(sc);
+ }
+#elif defined(CONFIG_ZRAM) // CONFIG_ZRAM
+ if (vmscan_swap_file_ratio) {
+ anon_prio = anon_prio * anon / (anon + file + 1);
+ file_prio = file_prio * file / (anon + file + 1);
+ }
+#endif // CONFIG_ZRAM
+
+
+
/*
* OK, so we have swap space and a fair amount of page cache
* pages. We use the recently rotated / recently scanned
if (zone->all_unreclaimable &&
sc->priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
- if (IS_ENABLED(CONFIG_COMPACTION)) {
+ if (IS_ENABLED(CONFIG_COMPACTION) && !sc->hibernation_mode) {
/*
* If we already have plenty of memory free for
* compaction in this zone, don't free any more.
unsigned long writeback_threshold;
bool aborted_reclaim;
+#ifdef CONFIG_FREEZER
+ if (unlikely(pm_freezing && !sc->hibernation_mode))
+ return 0;
+#endif
+
delayacct_freepages_start();
if (global_reclaim(sc))
if (!populated_zone(zone))
return;
+#ifdef CONFIG_FREEZER
+ if (pm_freezing)
+ return;
+#endif
+
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
return;
pgdat = zone->zone_pgdat;
* LRU order by reclaiming preferentially
* inactive > active > active referenced > active mapped
*/
-unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
+unsigned long shrink_memory_mask(unsigned long nr_to_reclaim, gfp_t mask)
{
struct reclaim_state reclaim_state;
struct scan_control sc = {
- .gfp_mask = GFP_HIGHUSER_MOVABLE,
+ .gfp_mask = mask,
.may_swap = 1,
.may_unmap = 1,
.may_writepage = 1,
return nr_reclaimed;
}
+EXPORT_SYMBOL_GPL(shrink_memory_mask);
+
+#ifdef CONFIG_MTKPASR
+extern void shrink_mtkpasr_all(void);
+#else
+#define shrink_mtkpasr_all() do {} while (0)
+#endif
+unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
+{
+ shrink_mtkpasr_all();
+ return shrink_memory_mask(nr_to_reclaim, GFP_HIGHUSER_MOVABLE);
+}
+EXPORT_SYMBOL_GPL(shrink_all_memory);
#endif /* CONFIG_HIBERNATION */
/* It's optimal to keep kswapds on the same CPUs as their memory, but
device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
}
#endif
+
+#ifdef CONFIG_MTKPASR
+void try_to_shrink_slab(void)
+{
+ struct shrinker *shrinker;
+ struct shrink_control shrink = {
+ .gfp_mask = GFP_KERNEL|__GFP_HIGHMEM,
+ };
+
+ if (!down_read_trylock(&shrinker_rwsem)) {
+ return;
+ }
+
+ list_for_each_entry(shrinker, &shrinker_list, list) {
+ int num_objs;
+ int shrink_ret = 0;
+ int retry = 2;
+
+ num_objs = do_shrinker_shrink(shrinker, &shrink, 0);
+ if (num_objs <= 0)
+ continue;
+
+ do {
+ /* To shrink */
+ shrink_ret = do_shrinker_shrink(shrinker, &shrink, num_objs);
+ if (shrink_ret == -1)
+ break;
+ /* Check empty */
+ num_objs = do_shrinker_shrink(shrinker, &shrink, 0);
+ if (num_objs <= 0)
+ break;
+ } while (--retry);
+ }
+
+ up_read(&shrinker_rwsem);
+}
+
+extern void free_hot_cold_page(struct page *page, int cold);
+/* Isolate pages for PASR */
+#ifdef CONFIG_MTKPASR_ALLEXTCOMP
+int mtkpasr_isolate_page(struct page *page, int check_swap)
+#else
+int mtkpasr_isolate_page(struct page *page)
+#endif
+{
+ struct zone *zone = page_zone(page);
+ struct lruvec *lruvec;
+ unsigned long flags;
+ isolate_mode_t mode = ISOLATE_ASYNC_MIGRATE;
+
+ /* Lock this zone - USE trylock version! */
+ if (!spin_trylock_irqsave(&zone->lru_lock, flags)) {
+ printk(KERN_ALERT"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
+ printk(KERN_ALERT"[%s][%d] Failed to lock this zone!\n",__FUNCTION__,__LINE__);
+ printk(KERN_ALERT"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
+ return -EAGAIN;
+ }
+
+#ifdef CONFIG_MTKPASR_ALLEXTCOMP
+ /* Check whether we should handle SwapBacked, SwapCache pages */
+ if (check_swap) {
+ if (PageSwapBacked(page) || PageSwapCache(page)) {
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+ return -EACCES;
+ }
+ }
+#endif
+
+ /* Try to isolate this page */
+ if (__isolate_lru_page(page, mode) != 0) {
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+ return -EACCES;
+ }
+
+ /* Successfully isolated */
+ lruvec = mem_cgroup_page_lruvec(page, zone);
+ del_page_from_lru_list(page, lruvec, page_lru(page));
+
+ /* Unlock this zone */
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+
+ return 0;
+}
+
+/* Drop page (in File/Anon LRUs) (Imitate the behavior of shrink_page_list) */
+/* If returns error, caller needs to putback page by itself. */
+int mtkpasr_drop_page(struct page *page)
+{
+ int ret;
+ unsigned long vm_flags = 0x0;
+ bool active = false;
+ struct address_space *mapping;
+ enum ttu_flags unmap_flags = TTU_UNMAP;
+
+ /* Suitable scan control */
+ struct scan_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .order = PAGE_ALLOC_COSTLY_ORDER + 1,
+ //.reclaim_mode = RECLAIM_MODE_SINGLE|RECLAIM_MODE_SYNC, // We only handle "SwapBacked" pages in this reclaim_mode!
+ };
+
+ /* Try to isolate this page */
+#ifdef CONFIG_MTKPASR_ALLEXTCOMP
+ ret = mtkpasr_isolate_page(page, 0x1);
+#else
+ ret = mtkpasr_isolate_page(page);
+#endif
+ if (ret) {
+ return ret;
+ }
+
+ /* Check whether it is evictable! */
+ if (unlikely(!page_evictable(page))) {
+ putback_lru_page(page);
+ return -EACCES;
+ }
+
+ /* If it is Active, reference and deactivate it */
+ if (PageActive(page)) {
+ active = TestClearPageActive(page);
+ }
+
+ /* If we fail to lock this page, ignore it */
+ if (!trylock_page(page)) {
+ goto putback;
+ }
+
+ /* If page is in writeback, we don't handle it here! */
+ if (PageWriteback(page)) {
+ goto unlock;
+ }
+
+ /*
+ * Anonymous process memory has backing store?
+ * Try to allocate it some swap space here.
+ */
+ if (PageAnon(page) && !PageSwapCache(page)) {
+ /* Check whether we have enough free memory */
+ if (vm_swap_full()) {
+ goto unlock;
+ }
+
+ /* Ok! It is safe to add this page to swap. */
+ if (!add_to_swap(page, NULL)){
+ goto unlock;
+ }
+ }
+
+ /* We don't handle dirty file cache here (Related devices may be suspended) */
+ if (page_is_file_cache(page)) {
+ /* How do we handle pages in VM_EXEC vmas? */
+ if ((vm_flags & VM_EXEC)) {
+ goto unlock;
+ }
+ /* We don't handle dirty file pages! */
+ if (PageDirty(page)) {
+#ifdef CONFIG_MTKPASR_DEBUG
+ printk(KERN_ALERT "\n\n\n\n\n\n [%s][%d]\n\n\n\n\n\n",__FUNCTION__,__LINE__);
+#endif
+ goto unlock;
+ }
+ }
+
+ /*
+ * The page is mapped into the page tables of one or more
+ * processes. Try to unmap it here.
+ */
+ mapping = page_mapping(page);
+ if (page_mapped(page) && mapping) {
+#if 0
+ /* Indicate unmap action for SwapBacked pages */
+ if (PageSwapBacked(page)) {
+ unmap_flags |= TTU_IGNORE_ACCESS;
+ }
+#endif
+ /* To unmap */
+ switch (try_to_unmap(page, unmap_flags)) {
+ case SWAP_SUCCESS:
+ /* try to free the page below */
+ break;
+ case SWAP_FAIL:
+ goto restore_swap;
+ case SWAP_AGAIN:
+ goto restore_swap;
+ case SWAP_MLOCK:
+ goto restore_swap;
+
+ }
+ }
+
+ /* Check whether it is dirtied.
+ * We have filtered out dirty file pages above. (IMPORTANT!)
+ * "VM_BUG_ON(!PageSwapBacked(page))"
+ * */
+ if (PageDirty(page)) {
+ /* Page is dirty, try to write it out here */
+ /* It's ok for zram swap! */
+ /* Should we need to apply GFP_IOFS? */
+ switch (pageout(page, mapping, &sc)) {
+ case PAGE_SUCCESS:
+ if (PageWriteback(page)) {
+ goto putback;
+ }
+ if (PageDirty(page)) {
+ goto putback;
+ }
+
+ /*
+ * A synchronous write - probably a ramdisk. Go
+ * ahead and try to reclaim the page.
+ */
+ if (!trylock_page(page)) {
+ goto putback;
+ }
+ if (PageDirty(page) || PageWriteback(page)) {
+ goto unlock;
+ }
+ mapping = page_mapping(page);
+ case PAGE_CLEAN:
+ /* try to free the page below */
+ break;
+ default:
+#ifdef CONFIG_MTKPASR_DEBUG
+ /*printk(KERN_ALERT "\n\n\n\n\n\n [%s][%d]\n\n\n\n\n\n",__FUNCTION__,__LINE__);*/
+#endif
+ goto restore_unmap;
+ }
+ }
+
+ /* Release buffer */
+ if (page_has_private(page)) {
+ if (!try_to_release_page(page, sc.gfp_mask)) {
+ goto unlock;
+ }
+ if (!mapping && page_count(page) == 1) {
+ unlock_page(page);
+ if (put_page_testzero(page)) {
+ goto freeit;
+ } else {
+ /* Race! TOCHECK */
+ printk(KERN_ALERT "\n\n\n\n\n\n [%s][%d] RACE!!\n\n\n\n\n\n",__FUNCTION__,__LINE__);
+ goto notask;
+ }
+ }
+ }
+ if (!mapping || !__remove_mapping(mapping, page)) {
+ goto unlock;
+ }
+
+ __clear_page_locked(page);
+
+freeit:
+ free_hot_cold_page(page, 0);
+ return 0;
+
+restore_unmap:
+ /* Do something */
+
+restore_swap:
+ if (PageSwapCache(page))
+ try_to_free_swap(page);
+
+unlock:
+ unlock_page(page);
+
+putback:
+ /* Activate it again if needed! */
+ if (active)
+ SetPageActive(page);
+
+ /* We don't putback them to corresponding LRUs, because we want to do more tasks outside this function!
+ putback_lru_page(page); */
+
+ /* Failedly dropped pages. Do migration! */
+ return -EBUSY;
+
+notask:
+ return 0;
+}
+#endif