memcg: fix gfp_mask of callers of charge
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Thu, 8 Jan 2009 02:07:49 +0000 (18:07 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 8 Jan 2009 16:31:04 +0000 (08:31 -0800)
Fix misuse of gfp_kernel.

Now, most of callers of mem_cgroup_charge_xxx functions uses GFP_KERNEL.

I think that this is from the fact that page_cgroup *was* dynamically
allocated.

But now, we allocate all page_cgroup at boot.  And
mem_cgroup_try_to_free_pages() reclaim memory from GFP_HIGHUSER_MOVABLE +
specified GFP_RECLAIM_MASK.

  * This is because we just want to reduce memory usage.
    "Where we should reclaim from ?" is not a problem in memcg.

This patch modifies gfp masks to be GFP_HIGUSER_MOVABLE if possible.

Note: This patch is not for fixing behavior but for showing sane information
      in source code.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c
mm/memory.c
mm/shmem.c
mm/swapfile.c

index f568b19645517b77fb054b20fdbfc952ad7bd628..c34eb52bdc3fe22ad571206cc4e7ddcacb8ec700 100644 (file)
@@ -808,8 +808,9 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
        }
        unlock_page_cgroup(pc);
        if (mem) {
-               ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
-                       ctype, mem);
+               ret = mem_cgroup_charge_common(newpage, NULL,
+                                       GFP_HIGHUSER_MOVABLE,
+                                       ctype, mem);
                css_put(&mem->css);
        }
        return ret;
@@ -889,7 +890,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                        ret = -EBUSY;
                        break;
                }
-               progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL);
+               progress = try_to_free_mem_cgroup_pages(memcg,
+                               GFP_HIGHUSER_MOVABLE);
                if (!progress)
                        retry_count--;
        }
index 7f210f1609905d770097969d68d3284997c89390..ba5189e322e62daee42240ec323600a1801778d6 100644 (file)
@@ -2000,7 +2000,7 @@ gotten:
        cow_user_page(new_page, old_page, address, vma);
        __SetPageUptodate(new_page);
 
-       if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
+       if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE))
                goto oom_free_new;
 
        /*
@@ -2431,7 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
-       if (mem_cgroup_try_charge(mm, GFP_KERNEL, &ptr) == -ENOMEM) {
+       if (mem_cgroup_try_charge(mm, GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) {
                ret = VM_FAULT_OOM;
                unlock_page(page);
                goto out;
@@ -2512,7 +2512,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto oom;
        __SetPageUptodate(page);
 
-       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
+       if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE))
                goto oom_free_page;
 
        entry = mk_pte(page, vma->vm_page_prot);
@@ -2603,7 +2603,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                ret = VM_FAULT_OOM;
                                goto out;
                        }
-                       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
+                       if (mem_cgroup_newpage_charge(page,
+                                               mm, GFP_HIGHUSER_MOVABLE)) {
                                ret = VM_FAULT_OOM;
                                page_cache_release(page);
                                goto out;
index 5941f980136367c8e0bc28c39b80f2acadee54c2..bd9b4ea307b2f57740dad25b9b5a0ddfce2cd1fb 100644 (file)
@@ -928,8 +928,8 @@ found:
        error = 1;
        if (!inode)
                goto out;
-       /* Precharge page using GFP_KERNEL while we can wait */
-       error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
+       /* Charge page using GFP_HIGHUSER_MOVABLE while we can wait */
+       error = mem_cgroup_cache_charge(page, current->mm, GFP_HIGHUSER_MOVABLE);
        if (error)
                goto out;
        error = radix_tree_preload(GFP_KERNEL);
@@ -1379,7 +1379,7 @@ repeat:
 
                        /* Precharge page while we can wait, compensate after */
                        error = mem_cgroup_cache_charge(filepage, current->mm,
-                                                       gfp & ~__GFP_HIGHMEM);
+                                       GFP_HIGHUSER_MOVABLE);
                        if (error) {
                                page_cache_release(filepage);
                                shmem_unacct_blocks(info->flags, 1);
index fb926efb51672e955204b72b0e5c1435660bcf15..ddc6d92be2cb43166acea2456be2a962d6070fa0 100644 (file)
@@ -695,7 +695,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
        pte_t *pte;
        int ret = 1;
 
-       if (mem_cgroup_try_charge(vma->vm_mm, GFP_KERNEL, &ptr))
+       if (mem_cgroup_try_charge(vma->vm_mm, GFP_HIGHUSER_MOVABLE, &ptr))
                ret = -ENOMEM;
 
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);