From: Johannes Weiner Date: Fri, 13 Jan 2012 01:18:10 +0000 (-0800) Subject: mm: collect LRU list heads into struct lruvec X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=6290df545814990ca2663baf6e894669132d5f73;p=GitHub%2Fexynos8895%2Fandroid_kernel_samsung_universal8895.git mm: collect LRU list heads into struct lruvec Having a unified structure with a LRU list set for both global zones and per-memcg zones allows to keep that code simple which deals with LRU lists and does not care about the container itself. Once the per-memcg LRU lists directly link struct pages, the isolation function and all other list manipulations are shared between the memcg case and the global LRU case. Signed-off-by: Johannes Weiner Reviewed-by: KAMEZAWA Hiroyuki Reviewed-by: Michal Hocko Reviewed-by: Kirill A. Shutemov Cc: Daisuke Nishimura Cc: Balbir Singh Cc: Ying Han Cc: Greg Thelen Cc: Michel Lespinasse Cc: Rik van Riel Cc: Minchan Kim Cc: Christoph Hellwig Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 8f7d24712dc1..e6a7ffe16d31 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -33,7 +33,7 @@ __add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l, static inline void add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) { - __add_page_to_lru_list(zone, page, l, &zone->lru[l].list); + __add_page_to_lru_list(zone, page, l, &zone->lruvec.lists[l]); } static inline void diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ca6ca92418a6..42e544cd4c9f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -159,6 +159,10 @@ static inline int is_unevictable_lru(enum lru_list l) return (l == LRU_UNEVICTABLE); } +struct lruvec { + struct list_head lists[NR_LRU_LISTS]; +}; + /* Mask used at gathering information at once (see memcontrol.c) */ #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) @@ -364,10 +368,8 @@ struct zone { ZONE_PADDING(_pad1_) /* Fields commonly accessed by the page reclaim scanner */ - spinlock_t lru_lock; - struct zone_lru { - struct list_head list; - } lru[NR_LRU_LISTS]; + spinlock_t lru_lock; + struct lruvec lruvec; struct zone_reclaim_stat reclaim_stat; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ad7f36f676ff..6e7f849a1a9e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -134,10 +134,7 @@ struct mem_cgroup_reclaim_iter { * per-zone information in memory controller. */ struct mem_cgroup_per_zone { - /* - * spin_lock to protect the per cgroup LRU - */ - struct list_head lists[NR_LRU_LISTS]; + struct lruvec lruvec; unsigned long count[NR_LRU_LISTS]; struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; @@ -1061,7 +1058,7 @@ void mem_cgroup_rotate_reclaimable_page(struct page *page) /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ smp_rmb(); mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); - list_move_tail(&pc->lru, &mz->lists[lru]); + list_move_tail(&pc->lru, &mz->lruvec.lists[lru]); } void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) @@ -1079,7 +1076,7 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ smp_rmb(); mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); - list_move(&pc->lru, &mz->lists[lru]); + list_move(&pc->lru, &mz->lruvec.lists[lru]); } void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) @@ -1109,7 +1106,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) /* huge page split is done under lru_lock. so, we have no races. */ MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); SetPageCgroupAcctLRU(pc); - list_add(&pc->lru, &mz->lists[lru]); + list_add(&pc->lru, &mz->lruvec.lists[lru]); } /* @@ -1307,7 +1304,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, BUG_ON(!mem_cont); mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); - src = &mz->lists[lru]; + src = &mz->lruvec.lists[lru]; scan = 0; list_for_each_entry_safe_reverse(pc, tmp, src, lru) { @@ -3738,7 +3735,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg, zone = &NODE_DATA(node)->node_zones[zid]; mz = mem_cgroup_zoneinfo(memcg, node, zid); - list = &mz->lists[lru]; + list = &mz->lruvec.lists[lru]; loop = MEM_CGROUP_ZSTAT(mz, lru); /* give some margin against EBUSY etc...*/ @@ -4864,7 +4861,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) for (zone = 0; zone < MAX_NR_ZONES; zone++) { mz = &pn->zoneinfo[zone]; for_each_lru(l) - INIT_LIST_HEAD(&mz->lists[l]); + INIT_LIST_HEAD(&mz->lruvec.lists[l]); mz->usage_in_excess = 0; mz->on_tree = false; mz->mem = memcg; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 794e6715c226..25c248eb7d5f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4288,7 +4288,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, zone_pcp_init(zone); for_each_lru(l) - INIT_LIST_HEAD(&zone->lru[l].list); + INIT_LIST_HEAD(&zone->lruvec.lists[l]); zone->reclaim_stat.recent_rotated[0] = 0; zone->reclaim_stat.recent_rotated[1] = 0; zone->reclaim_stat.recent_scanned[0] = 0; diff --git a/mm/swap.c b/mm/swap.c index 67a09a633a09..76ef79d3857c 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -236,7 +236,7 @@ static void pagevec_move_tail_fn(struct page *page, void *arg) if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { enum lru_list lru = page_lru_base_type(page); - list_move_tail(&page->lru, &zone->lru[lru].list); + list_move_tail(&page->lru, &zone->lruvec.lists[lru]); mem_cgroup_rotate_reclaimable_page(page); (*pgmoved)++; } @@ -480,7 +480,7 @@ static void lru_deactivate_fn(struct page *page, void *arg) * The page's writeback ends up during pagevec * We moves tha page into tail of inactive. */ - list_move_tail(&page->lru, &zone->lru[lru].list); + list_move_tail(&page->lru, &zone->lruvec.lists[lru]); mem_cgroup_rotate_reclaimable_page(page); __count_vm_event(PGROTATED); } @@ -654,7 +654,6 @@ void lru_add_page_tail(struct zone* zone, int active; enum lru_list lru; const int file = 0; - struct list_head *head; VM_BUG_ON(!PageHead(page)); VM_BUG_ON(PageCompound(page_tail)); @@ -674,10 +673,10 @@ void lru_add_page_tail(struct zone* zone, } update_page_reclaim_stat(zone, page_tail, file, active); if (likely(PageLRU(page))) - head = page->lru.prev; + __add_page_to_lru_list(zone, page_tail, lru, + page->lru.prev); else - head = &zone->lru[lru].list; - __add_page_to_lru_list(zone, page_tail, lru, head); + add_page_to_lru_list(zone, page_tail, lru); } else { SetPageUnevictable(page_tail); add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE); diff --git a/mm/vmscan.c b/mm/vmscan.c index 024168cfdcb0..93cdc44a1693 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1250,8 +1250,8 @@ static unsigned long isolate_pages_global(unsigned long nr, lru += LRU_ACTIVE; if (file) lru += LRU_FILE; - return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order, - mode, file); + return isolate_lru_pages(nr, &z->lruvec.lists[lru], dst, + scanned, order, mode, file); } /* @@ -1630,7 +1630,7 @@ static void move_active_pages_to_lru(struct zone *zone, VM_BUG_ON(PageLRU(page)); SetPageLRU(page); - list_move(&page->lru, &zone->lru[lru].list); + list_move(&page->lru, &zone->lruvec.lists[lru]); mem_cgroup_add_lru_list(page, lru); pgmoved += hpage_nr_pages(page); @@ -3448,7 +3448,7 @@ retry: enum lru_list l = page_lru_base_type(page); __dec_zone_state(zone, NR_UNEVICTABLE); - list_move(&page->lru, &zone->lru[l].list); + list_move(&page->lru, &zone->lruvec.lists[l]); mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l); __inc_zone_state(zone, NR_INACTIVE_ANON + l); __count_vm_event(UNEVICTABLE_PGRESCUED); @@ -3457,7 +3457,7 @@ retry: * rotate unevictable list */ SetPageUnevictable(page); - list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); + list_move(&page->lru, &zone->lruvec.lists[LRU_UNEVICTABLE]); mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE); if (page_evictable(page, NULL)) goto retry;