mm: per-process reclaim
authorMinchan Kim <minchan@kernel.org>
Fri, 9 Jan 2015 13:06:55 +0000 (18:36 +0530)
committerPDO SCM Team <hudsoncm@motorola.com>
Fri, 15 Nov 2019 06:58:49 +0000 (00:58 -0600)
These day, there are many platforms available in the embedded market
and they are smarter than kernel which has very limited information
about working set so they want to involve memory management more heavily
like android's lowmemory killer and ashmem or recent many lowmemory
notifier.

One of the simple imaGine scenario about userspace's intelligence is that
platform can manage tasks as forground and background so it would be
better to reclaim background's task pages for end-user's *responsibility*
although it has frequent referenced pages.

This patch adds new knob "reclaim under proc/<pid>/" so task manager
can reclaim any target process anytime, anywhere. It could give another
method to platform for using memory efficiently.

It can avoid process killing for getting free memory, which was really
terrible experience because I lost my best score of game I had ever
after I switch the phone call while I enjoyed the game.

Reclaim file-backed pages only.
echo file > /proc/PID/reclaim
Reclaim anonymous pages only.
echo anon > /proc/PID/reclaim
Reclaim all pages
echo all > /proc/PID/reclaim

Mot-CRs-fixed: (CR)

Bug: 122047783
Change-Id: I2f629f7a43289af114df27044b1d2af4a6e785bc
Signed-off-by: Tim Murray <timmurray@google.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
Reviewed-on: https://gerrit.mot.com/1453727
Tested-by: Jira Key
SLTApproved: Slta Waiver
SME-Granted: SME Approvals Granted
Reviewed-by: Xiangpo Zhao <zhaoxp3@motorola.com>
Submit-Approved: Jira Key

fs/proc/base.c
fs/proc/internal.h
fs/proc/task_mmu.c
include/linux/rmap.h
mm/Kconfig
mm/vmscan.c

index 3b593d1bafaf0bcba433ebd69d72f79811e9b551..00653689a83de2c319e70ad3b73aba0258e9f65f 100644 (file)
@@ -2973,6 +2973,9 @@ static const struct pid_entry tgid_base_stuff[] = {
        REG("mounts",     S_IRUGO, proc_mounts_operations),
        REG("mountinfo",  S_IRUGO, proc_mountinfo_operations),
        REG("mountstats", S_IRUSR, proc_mountstats_operations),
+#ifdef CONFIG_PROCESS_RECLAIM
+       REG("reclaim",    S_IWUGO, proc_reclaim_operations),
+#endif
 #ifdef CONFIG_PROC_PAGE_MONITOR
        REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
        REG("smaps",      S_IRUGO, proc_pid_smaps_operations),
index 02e0d9e9b3fed2e338492515f034a20c76606ebe..6df8e2a33a2f0c948f69ba4a905c8f96648e44f6 100644 (file)
@@ -199,6 +199,7 @@ struct pde_opener {
 extern const struct inode_operations proc_link_inode_operations;
 
 extern const struct inode_operations proc_pid_link_inode_operations;
+extern const struct file_operations proc_reclaim_operations;
 
 extern void proc_init_inodecache(void);
 void set_proc_pid_nlink(void);
index ac1c617440bca6b8d13e697b31af19a412f14842..f0c7688f5ba4732d97756c47cdc63fc4f6e440ef 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/page_idle.h>
 #include <linux/shmem_fs.h>
 #include <linux/uaccess.h>
+#include <linux/mm_inline.h>
 
 #include <asm/elf.h>
 #include <asm/tlb.h>
@@ -1651,6 +1652,144 @@ const struct file_operations proc_pagemap_operations = {
 };
 #endif /* CONFIG_PROC_PAGE_MONITOR */
 
+#ifdef CONFIG_PROCESS_RECLAIM
+enum reclaim_type {
+       RECLAIM_FILE,
+       RECLAIM_ANON,
+       RECLAIM_ALL,
+};
+
+static int reclaim_pmd_range(pmd_t *pmd, unsigned long addr,
+                               unsigned long end, struct mm_walk *walk)
+{
+       pte_t *orig_pte, *pte, ptent;
+       spinlock_t *ptl;
+       LIST_HEAD(page_list);
+       struct page *page;
+       int isolated = 0;
+       struct vm_area_struct *vma = walk->vma;
+
+       orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+       for (pte = orig_pte; addr < end; pte++, addr += PAGE_SIZE) {
+               ptent = *pte;
+               if (!pte_present(ptent))
+                       continue;
+
+               page = vm_normal_page(vma, addr, ptent);
+               if (!page)
+                       continue;
+               /*
+                * XXX: we don't handle compound page at this moment but
+                * it should revisit for THP page before upstream.
+                */
+               if (PageCompound(page)) {
+                       unsigned int order = compound_order(page);
+                       unsigned int nr_pages = (1 << order) - 1;
+
+                       addr += (nr_pages * PAGE_SIZE);
+                       pte += nr_pages;
+                       continue;
+               }
+
+               if (!PageLRU(page))
+                       continue;
+
+               if (page_mapcount(page) > 1)
+                       continue;
+
+               if (isolate_lru_page(page))
+                       continue;
+
+               isolated++;
+               list_add(&page->lru, &page_list);
+               if (isolated >= SWAP_CLUSTER_MAX) {
+                       pte_unmap_unlock(orig_pte, ptl);
+                       reclaim_pages(&page_list);
+                       isolated = 0;
+                       pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+                       orig_pte = pte;
+               }
+       }
+
+       pte_unmap_unlock(orig_pte, ptl);
+       reclaim_pages(&page_list);
+
+       cond_resched();
+       return 0;
+}
+
+static ssize_t reclaim_write(struct file *file, const char __user *buf,
+                               size_t count, loff_t *ppos)
+{
+       struct task_struct *task;
+       char buffer[PROC_NUMBUF];
+       struct mm_struct *mm;
+       struct vm_area_struct *vma;
+       enum reclaim_type type;
+       char *type_buf;
+
+       if (!capable(CAP_SYS_NICE))
+               return -EPERM;
+
+       memset(buffer, 0, sizeof(buffer));
+       if (count > sizeof(buffer) - 1)
+               count = sizeof(buffer) - 1;
+
+       if (copy_from_user(buffer, buf, count))
+               return -EFAULT;
+
+       type_buf = strstrip(buffer);
+       if (!strcmp(type_buf, "file"))
+               type = RECLAIM_FILE;
+       else if (!strcmp(type_buf, "anon"))
+               type = RECLAIM_ANON;
+       else if (!strcmp(type_buf, "all"))
+               type = RECLAIM_ALL;
+       else
+               return -EINVAL;
+
+       task = get_proc_task(file->f_path.dentry->d_inode);
+       if (!task)
+               return -ESRCH;
+
+       mm = get_task_mm(task);
+       if (mm) {
+               struct mm_walk reclaim_walk = {
+                       .pmd_entry = reclaim_pmd_range,
+                       .mm = mm,
+               };
+
+               down_read(&mm->mmap_sem);
+               for (vma = mm->mmap; vma; vma = vma->vm_next) {
+                       if (is_vm_hugetlb_page(vma))
+                               continue;
+
+                       if (vma->vm_flags & VM_LOCKED)
+                               continue;
+
+                       if (type == RECLAIM_ANON && !vma_is_anonymous(vma))
+                               continue;
+                       if (type == RECLAIM_FILE && vma_is_anonymous(vma))
+                               continue;
+
+                       walk_page_range(vma->vm_start, vma->vm_end,
+                                       &reclaim_walk);
+               }
+               flush_tlb_mm(mm);
+               up_read(&mm->mmap_sem);
+               mmput(mm);
+       }
+       put_task_struct(task);
+
+       return count;
+}
+
+const struct file_operations proc_reclaim_operations = {
+       .write          = reclaim_write,
+       .llseek         = noop_llseek,
+};
+#endif
+
 #ifdef CONFIG_NUMA
 
 struct numa_maps {
index 988d176472df75342b307cb40b809049f0ed64cb..bf23fd2147a496f9186f5da1da2be8ec5dda5e72 100644 (file)
 #include <linux/memcontrol.h>
 #include <linux/highmem.h>
 
+extern int isolate_lru_page(struct page *page);
+extern void putback_lru_page(struct page *page);
+extern unsigned long reclaim_pages(struct list_head *page_list);
+
 /*
  * The anon_vma heads a list of private "related" vmas, to scan if
  * an anonymous page pointing to this anon_vma needs to be unmapped:
index 62034754105d6a2a1a25107016294766b4d6f52f..92e3f35116397c1964532773b5f7c24987e33332 100644 (file)
@@ -244,6 +244,18 @@ config COMPACTION
           it and then we would be really interested to hear about that at
           linux-mm@kvack.org.
 
+config PROCESS_RECLAIM
+       bool "Enable process reclaim"
+       depends on PROC_FS
+       default n
+       help
+        It allows to reclaim pages of the process by /proc/pid/reclaim.
+
+        (echo file > /proc/PID/reclaim) reclaims file-backed pages only.
+        (echo anon > /proc/PID/reclaim) reclaims anonymous pages only.
+        (echo all > /proc/PID/reclaim) reclaims all pages.
+
+        Any other value is ignored.
 #
 # support for page migration
 #
index 0b92919df47fcee334065ad744d738dfd58196ed..dfecd5f77fd4659e5cc4c34007d03729f6574a7e 100644 (file)
@@ -963,7 +963,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                      struct scan_control *sc,
                                      enum ttu_flags ttu_flags,
                                      struct reclaim_stat *stat,
-                                     bool force_reclaim)
+                                     bool skip_reference_check)
 {
        LIST_HEAD(ret_pages);
        LIST_HEAD(free_pages);
@@ -983,7 +983,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                struct address_space *mapping;
                struct page *page;
                int may_enter_fs;
-               enum page_references references = PAGEREF_RECLAIM_CLEAN;
+               enum page_references references = PAGEREF_RECLAIM;
                bool dirty, writeback;
 
                cond_resched();
@@ -1115,7 +1115,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                        }
                }
 
-               if (!force_reclaim)
+               if (!skip_reference_check)
                        references = page_check_references(page, sc);
 
                switch (references) {
@@ -1398,6 +1398,56 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
        return ret;
 }
 
+#ifdef CONFIG_PROCESS_RECLAIM
+unsigned long reclaim_pages(struct list_head *page_list)
+{
+       struct reclaim_stat dummy;
+       unsigned long nr_reclaimed;
+       struct page *page;
+       unsigned long nr_isolated[2] = {0, };
+       struct pglist_data *pgdat = NULL;
+       struct scan_control sc = {
+               .gfp_mask = GFP_KERNEL,
+               .priority = DEF_PRIORITY,
+               .may_writepage = 1,
+               .may_unmap = 1,
+               .may_swap = 1,
+       };
+
+       if (list_empty(page_list))
+               return 0;
+
+       list_for_each_entry(page, page_list, lru) {
+               ClearPageActive(page);
+               if (pgdat == NULL)
+                       pgdat = page_pgdat(page);
+               /* XXX: It could be multiple node in other config */
+               WARN_ON_ONCE(pgdat != page_pgdat(page));
+               if (!page_is_file_cache(page))
+                       nr_isolated[0]++;
+               else
+                       nr_isolated[1]++;
+       }
+
+       mod_node_page_state(pgdat, NR_ISOLATED_ANON, nr_isolated[0]);
+       mod_node_page_state(pgdat, NR_ISOLATED_FILE, nr_isolated[1]);
+
+       nr_reclaimed = shrink_page_list(page_list, pgdat, &sc,
+                       TTU_IGNORE_ACCESS, &dummy, true);
+
+       while (!list_empty(page_list)) {
+               page = lru_to_page(page_list);
+               list_del(&page->lru);
+               putback_lru_page(page);
+       }
+
+       mod_node_page_state(pgdat, NR_ISOLATED_ANON, -nr_isolated[0]);
+       mod_node_page_state(pgdat, NR_ISOLATED_FILE, -nr_isolated[1]);
+
+       return nr_reclaimed;
+}
+#endif
+
 /*
  * Attempt to remove the specified page from its LRU.  Only take this page
  * if it is of the appropriate PageActive status.  Pages which are being