From: Hugh Dickins Date: Tue, 4 Mar 2008 22:29:08 +0000 (-0800) Subject: memcg: remove mem_cgroup_uncharge X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=8289546e573d5ff681cdf0fc7a1184cca66fdb55;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git memcg: remove mem_cgroup_uncharge Nothing uses mem_cgroup_uncharge apart from mem_cgroup_uncharge_page, (a trivial wrapper around it) and mem_cgroup_end_migration (which does the same as mem_cgroup_uncharge_page). And it often ends up having to lock just to let its caller unlock. Remove it (but leave the silly locking until a later patch). Moved mem_cgroup_cache_charge next to mem_cgroup_charge in memcontrol.h. Signed-off-by: Hugh Dickins Cc: David Rientjes Acked-by: Balbir Singh Acked-by: KAMEZAWA Hiroyuki Cc: Hirokazu Takahashi Cc: YAMAMOTO Takashi Cc: Paul Menage Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 70789df7dab4..8b1c4295848b 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -35,7 +35,8 @@ extern void mm_free_cgroup(struct mm_struct *mm); extern struct page_cgroup *page_get_page_cgroup(struct page *page); extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); -extern void mem_cgroup_uncharge(struct page_cgroup *pc); +extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask); extern void mem_cgroup_uncharge_page(struct page *page); extern void mem_cgroup_move_lists(struct page *page, bool active); extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, @@ -45,8 +46,6 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct mem_cgroup *mem_cont, int active); extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); -extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, - gfp_t gfp_mask); int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); #define mm_match_cgroup(mm, cgroup) \ @@ -92,14 +91,16 @@ static inline struct page_cgroup *page_get_page_cgroup(struct page *page) return NULL; } -static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, - gfp_t gfp_mask) +static inline int mem_cgroup_charge(struct page *page, + struct mm_struct *mm, gfp_t gfp_mask) { return 0; } -static inline void mem_cgroup_uncharge(struct page_cgroup *pc) +static inline int mem_cgroup_cache_charge(struct page *page, + struct mm_struct *mm, gfp_t gfp_mask) { + return 0; } static inline void mem_cgroup_uncharge_page(struct page *page) @@ -110,13 +111,6 @@ static inline void mem_cgroup_move_lists(struct page *page, bool active) { } -static inline int mem_cgroup_cache_charge(struct page *page, - struct mm_struct *mm, - gfp_t gfp_mask) -{ - return 0; -} - static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) { return 1; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 83ba13ad31e1..1333d25163bb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -697,20 +697,22 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, /* * Uncharging is always a welcome operation, we never complain, simply - * uncharge. This routine should be called with lock_page_cgroup held + * uncharge. */ -void mem_cgroup_uncharge(struct page_cgroup *pc) +void mem_cgroup_uncharge_page(struct page *page) { + struct page_cgroup *pc; struct mem_cgroup *mem; struct mem_cgroup_per_zone *mz; - struct page *page; unsigned long flags; /* * Check if our page_cgroup is valid */ + lock_page_cgroup(page); + pc = page_get_page_cgroup(page); if (!pc) - return; + goto unlock; if (atomic_dec_and_test(&pc->ref_cnt)) { page = pc->page; @@ -731,12 +733,8 @@ void mem_cgroup_uncharge(struct page_cgroup *pc) } lock_page_cgroup(page); } -} -void mem_cgroup_uncharge_page(struct page *page) -{ - lock_page_cgroup(page); - mem_cgroup_uncharge(page_get_page_cgroup(page)); +unlock: unlock_page_cgroup(page); } @@ -759,12 +757,7 @@ int mem_cgroup_prepare_migration(struct page *page) void mem_cgroup_end_migration(struct page *page) { - struct page_cgroup *pc; - - lock_page_cgroup(page); - pc = page_get_page_cgroup(page); - mem_cgroup_uncharge(pc); - unlock_page_cgroup(page); + mem_cgroup_uncharge_page(page); } /* * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.