mm: clean up __count_immobile_pages()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / page_cgroup.c
CommitLineData
52d4b9ac
KH
1#include <linux/mm.h>
2#include <linux/mmzone.h>
3#include <linux/bootmem.h>
4#include <linux/bit_spinlock.h>
5#include <linux/page_cgroup.h>
6#include <linux/hash.h>
94b6da5a 7#include <linux/slab.h>
52d4b9ac 8#include <linux/memory.h>
4c821042 9#include <linux/vmalloc.h>
94b6da5a 10#include <linux/cgroup.h>
27a7faa0 11#include <linux/swapops.h>
7952f988 12#include <linux/kmemleak.h>
52d4b9ac 13
52d4b9ac
KH
14static unsigned long total_usage;
15
16#if !defined(CONFIG_SPARSEMEM)
17
18
31168481 19void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
52d4b9ac
KH
20{
21 pgdat->node_page_cgroup = NULL;
22}
23
24struct page_cgroup *lookup_page_cgroup(struct page *page)
25{
26 unsigned long pfn = page_to_pfn(page);
27 unsigned long offset;
28 struct page_cgroup *base;
29
30 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
00c54c0b
JW
31#ifdef CONFIG_DEBUG_VM
32 /*
33 * The sanity checks the page allocator does upon freeing a
34 * page can reach here before the page_cgroup arrays are
35 * allocated when feeding a range of pages to the allocator
36 * for the first time during bootup or memory hotplug.
37 */
52d4b9ac
KH
38 if (unlikely(!base))
39 return NULL;
00c54c0b 40#endif
52d4b9ac
KH
41 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
42 return base + offset;
43}
44
45static int __init alloc_node_page_cgroup(int nid)
46{
6b208e3f 47 struct page_cgroup *base;
52d4b9ac 48 unsigned long table_size;
6b208e3f 49 unsigned long nr_pages;
52d4b9ac 50
52d4b9ac 51 nr_pages = NODE_DATA(nid)->node_spanned_pages;
653d22c0
KH
52 if (!nr_pages)
53 return 0;
54
52d4b9ac 55 table_size = sizeof(struct page_cgroup) * nr_pages;
ca371c0d
KH
56
57 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
58 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
59 if (!base)
52d4b9ac 60 return -ENOMEM;
52d4b9ac
KH
61 NODE_DATA(nid)->node_page_cgroup = base;
62 total_usage += table_size;
63 return 0;
64}
65
ca371c0d 66void __init page_cgroup_init_flatmem(void)
52d4b9ac
KH
67{
68
69 int nid, fail;
70
f8d66542 71 if (mem_cgroup_disabled())
94b6da5a
KH
72 return;
73
52d4b9ac
KH
74 for_each_online_node(nid) {
75 fail = alloc_node_page_cgroup(nid);
76 if (fail)
77 goto fail;
78 }
79 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
8ca739e3
RD
80 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you"
81 " don't want memory cgroups\n");
52d4b9ac
KH
82 return;
83fail:
8ca739e3
RD
84 printk(KERN_CRIT "allocation of page_cgroup failed.\n");
85 printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n");
52d4b9ac
KH
86 panic("Out of memory");
87}
88
89#else /* CONFIG_FLAT_NODE_MEM_MAP */
90
91struct page_cgroup *lookup_page_cgroup(struct page *page)
92{
93 unsigned long pfn = page_to_pfn(page);
94 struct mem_section *section = __pfn_to_section(pfn);
00c54c0b
JW
95#ifdef CONFIG_DEBUG_VM
96 /*
97 * The sanity checks the page allocator does upon freeing a
98 * page can reach here before the page_cgroup arrays are
99 * allocated when feeding a range of pages to the allocator
100 * for the first time during bootup or memory hotplug.
101 */
d69b042f
BS
102 if (!section->page_cgroup)
103 return NULL;
00c54c0b 104#endif
52d4b9ac
KH
105 return section->page_cgroup + pfn;
106}
107
268433b8 108static void *__meminit alloc_page_cgroup(size_t size, int nid)
dde79e00 109{
6b208e3f 110 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
dde79e00
MH
111 void *addr = NULL;
112
ff7ee93f
SR
113 addr = alloc_pages_exact_nid(nid, size, flags);
114 if (addr) {
115 kmemleak_alloc(addr, size, 1, flags);
dde79e00 116 return addr;
ff7ee93f 117 }
dde79e00
MH
118
119 if (node_state(nid, N_HIGH_MEMORY))
6b208e3f 120 addr = vzalloc_node(size, nid);
dde79e00 121 else
6b208e3f 122 addr = vzalloc(size);
dde79e00
MH
123
124 return addr;
125}
126
37573e8c 127static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
52d4b9ac 128{
6b3ae58e 129 struct mem_section *section;
6b208e3f 130 struct page_cgroup *base;
52d4b9ac 131 unsigned long table_size;
52d4b9ac 132
6b208e3f 133 section = __pfn_to_section(pfn);
6b3ae58e
JW
134
135 if (section->page_cgroup)
136 return 0;
137
6b3ae58e 138 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
dde79e00
MH
139 base = alloc_page_cgroup(table_size, nid);
140
6b3ae58e
JW
141 /*
142 * The value stored in section->page_cgroup is (base - pfn)
143 * and it does not point to the memory block allocated above,
144 * causing kmemleak false positives.
145 */
146 kmemleak_not_leak(base);
52d4b9ac
KH
147
148 if (!base) {
149 printk(KERN_ERR "page cgroup allocation failure\n");
150 return -ENOMEM;
151 }
152
37573e8c
KH
153 /*
154 * The passed "pfn" may not be aligned to SECTION. For the calculation
155 * we need to apply a mask.
156 */
157 pfn &= PAGE_SECTION_MASK;
52d4b9ac
KH
158 section->page_cgroup = base - pfn;
159 total_usage += table_size;
160 return 0;
161}
162#ifdef CONFIG_MEMORY_HOTPLUG
0efc8eb9
BL
163static void free_page_cgroup(void *addr)
164{
165 if (is_vmalloc_addr(addr)) {
166 vfree(addr);
167 } else {
168 struct page *page = virt_to_page(addr);
169 size_t table_size =
170 sizeof(struct page_cgroup) * PAGES_PER_SECTION;
171
172 BUG_ON(PageReserved(page));
173 free_pages_exact(addr, table_size);
174 }
175}
176
52d4b9ac
KH
177void __free_page_cgroup(unsigned long pfn)
178{
179 struct mem_section *ms;
180 struct page_cgroup *base;
181
182 ms = __pfn_to_section(pfn);
183 if (!ms || !ms->page_cgroup)
184 return;
185 base = ms->page_cgroup + pfn;
dde79e00
MH
186 free_page_cgroup(base);
187 ms->page_cgroup = NULL;
52d4b9ac
KH
188}
189
31168481 190int __meminit online_page_cgroup(unsigned long start_pfn,
52d4b9ac
KH
191 unsigned long nr_pages,
192 int nid)
193{
194 unsigned long start, end, pfn;
195 int fail = 0;
196
1bb36fbd
DK
197 start = SECTION_ALIGN_DOWN(start_pfn);
198 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
52d4b9ac 199
37573e8c
KH
200 if (nid == -1) {
201 /*
202 * In this case, "nid" already exists and contains valid memory.
203 * "start_pfn" passed to us is a pfn which is an arg for
204 * online__pages(), and start_pfn should exist.
205 */
206 nid = pfn_to_nid(start_pfn);
207 VM_BUG_ON(!node_state(nid, N_ONLINE));
208 }
209
52d4b9ac
KH
210 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
211 if (!pfn_present(pfn))
212 continue;
37573e8c 213 fail = init_section_page_cgroup(pfn, nid);
52d4b9ac
KH
214 }
215 if (!fail)
216 return 0;
217
218 /* rollback */
219 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
220 __free_page_cgroup(pfn);
221
222 return -ENOMEM;
223}
224
31168481 225int __meminit offline_page_cgroup(unsigned long start_pfn,
52d4b9ac
KH
226 unsigned long nr_pages, int nid)
227{
228 unsigned long start, end, pfn;
229
1bb36fbd
DK
230 start = SECTION_ALIGN_DOWN(start_pfn);
231 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
52d4b9ac
KH
232
233 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
234 __free_page_cgroup(pfn);
235 return 0;
236
237}
238
31168481 239static int __meminit page_cgroup_callback(struct notifier_block *self,
52d4b9ac
KH
240 unsigned long action, void *arg)
241{
242 struct memory_notify *mn = arg;
243 int ret = 0;
244 switch (action) {
245 case MEM_GOING_ONLINE:
246 ret = online_page_cgroup(mn->start_pfn,
247 mn->nr_pages, mn->status_change_nid);
248 break;
52d4b9ac
KH
249 case MEM_OFFLINE:
250 offline_page_cgroup(mn->start_pfn,
251 mn->nr_pages, mn->status_change_nid);
252 break;
dc19f9db 253 case MEM_CANCEL_ONLINE:
52d4b9ac
KH
254 case MEM_GOING_OFFLINE:
255 break;
256 case MEM_ONLINE:
257 case MEM_CANCEL_OFFLINE:
258 break;
259 }
dc19f9db 260
5fda1bd5 261 return notifier_from_errno(ret);
52d4b9ac
KH
262}
263
264#endif
265
266void __init page_cgroup_init(void)
267{
268 unsigned long pfn;
37573e8c 269 int nid;
52d4b9ac 270
f8d66542 271 if (mem_cgroup_disabled())
94b6da5a
KH
272 return;
273
37573e8c
KH
274 for_each_node_state(nid, N_HIGH_MEMORY) {
275 unsigned long start_pfn, end_pfn;
276
277 start_pfn = node_start_pfn(nid);
278 end_pfn = node_end_pfn(nid);
279 /*
280 * start_pfn and end_pfn may not be aligned to SECTION and the
281 * page->flags of out of node pages are not initialized. So we
282 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
283 */
284 for (pfn = start_pfn;
285 pfn < end_pfn;
286 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
287
288 if (!pfn_valid(pfn))
289 continue;
290 /*
291 * Nodes's pfns can be overlapping.
292 * We know some arch can have a nodes layout such as
293 * -------------pfn-------------->
294 * N0 | N1 | N2 | N0 | N1 | N2|....
295 */
296 if (pfn_to_nid(pfn) != nid)
297 continue;
298 if (init_section_page_cgroup(pfn, nid))
299 goto oom;
300 }
52d4b9ac 301 }
37573e8c 302 hotplug_memory_notifier(page_cgroup_callback, 0);
52d4b9ac 303 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
37573e8c
KH
304 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you "
305 "don't want memory cgroups\n");
306 return;
307oom:
308 printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
309 panic("Out of memory");
52d4b9ac
KH
310}
311
31168481 312void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
52d4b9ac
KH
313{
314 return;
315}
316
317#endif
27a7faa0
KH
318
319
320#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
321
322static DEFINE_MUTEX(swap_cgroup_mutex);
323struct swap_cgroup_ctrl {
324 struct page **map;
325 unsigned long length;
e9e58a4e 326 spinlock_t lock;
27a7faa0
KH
327};
328
61600f57 329static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
27a7faa0 330
27a7faa0 331struct swap_cgroup {
a3b2d692 332 unsigned short id;
27a7faa0
KH
333};
334#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
27a7faa0
KH
335
336/*
337 * SwapCgroup implements "lookup" and "exchange" operations.
338 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
339 * against SwapCache. At swap_free(), this is accessed directly from swap.
340 *
341 * This means,
342 * - we have no race in "exchange" when we're accessed via SwapCache because
343 * SwapCache(and its swp_entry) is under lock.
344 * - When called via swap_free(), there is no user of this entry and no race.
345 * Then, we don't need lock around "exchange".
346 *
347 * TODO: we can push these buffers out to HIGHMEM.
348 */
349
350/*
351 * allocate buffer for swap_cgroup.
352 */
353static int swap_cgroup_prepare(int type)
354{
355 struct page *page;
356 struct swap_cgroup_ctrl *ctrl;
357 unsigned long idx, max;
358
27a7faa0
KH
359 ctrl = &swap_cgroup_ctrl[type];
360
361 for (idx = 0; idx < ctrl->length; idx++) {
362 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
363 if (!page)
364 goto not_enough_page;
365 ctrl->map[idx] = page;
366 }
367 return 0;
368not_enough_page:
369 max = idx;
370 for (idx = 0; idx < max; idx++)
371 __free_page(ctrl->map[idx]);
372
373 return -ENOMEM;
374}
375
9fb4b7cc
BL
376static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
377 struct swap_cgroup_ctrl **ctrlp)
378{
379 pgoff_t offset = swp_offset(ent);
380 struct swap_cgroup_ctrl *ctrl;
381 struct page *mappage;
c09ff089 382 struct swap_cgroup *sc;
9fb4b7cc
BL
383
384 ctrl = &swap_cgroup_ctrl[swp_type(ent)];
385 if (ctrlp)
386 *ctrlp = ctrl;
387
388 mappage = ctrl->map[offset / SC_PER_PAGE];
c09ff089
HD
389 sc = page_address(mappage);
390 return sc + offset % SC_PER_PAGE;
9fb4b7cc
BL
391}
392
02491447
DN
393/**
394 * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
dad7557e 395 * @ent: swap entry to be cmpxchged
02491447
DN
396 * @old: old id
397 * @new: new id
398 *
399 * Returns old id at success, 0 at failure.
25985edc 400 * (There is no mem_cgroup using 0 as its id)
02491447
DN
401 */
402unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
403 unsigned short old, unsigned short new)
404{
02491447 405 struct swap_cgroup_ctrl *ctrl;
02491447 406 struct swap_cgroup *sc;
e9e58a4e
KH
407 unsigned long flags;
408 unsigned short retval;
02491447 409
9fb4b7cc 410 sc = lookup_swap_cgroup(ent, &ctrl);
02491447 411
e9e58a4e
KH
412 spin_lock_irqsave(&ctrl->lock, flags);
413 retval = sc->id;
414 if (retval == old)
415 sc->id = new;
02491447 416 else
e9e58a4e
KH
417 retval = 0;
418 spin_unlock_irqrestore(&ctrl->lock, flags);
419 return retval;
02491447
DN
420}
421
27a7faa0
KH
422/**
423 * swap_cgroup_record - record mem_cgroup for this swp_entry.
424 * @ent: swap entry to be recorded into
dad7557e 425 * @id: mem_cgroup to be recorded
27a7faa0 426 *
a3b2d692
KH
427 * Returns old value at success, 0 at failure.
428 * (Of course, old value can be 0.)
27a7faa0 429 */
a3b2d692 430unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
27a7faa0 431{
27a7faa0 432 struct swap_cgroup_ctrl *ctrl;
27a7faa0 433 struct swap_cgroup *sc;
a3b2d692 434 unsigned short old;
e9e58a4e 435 unsigned long flags;
27a7faa0 436
9fb4b7cc 437 sc = lookup_swap_cgroup(ent, &ctrl);
27a7faa0 438
e9e58a4e
KH
439 spin_lock_irqsave(&ctrl->lock, flags);
440 old = sc->id;
441 sc->id = id;
442 spin_unlock_irqrestore(&ctrl->lock, flags);
27a7faa0
KH
443
444 return old;
445}
446
447/**
9fb4b7cc 448 * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry
27a7faa0
KH
449 * @ent: swap entry to be looked up.
450 *
a3b2d692 451 * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
27a7faa0 452 */
9fb4b7cc 453unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
27a7faa0 454{
9fb4b7cc 455 return lookup_swap_cgroup(ent, NULL)->id;
27a7faa0
KH
456}
457
458int swap_cgroup_swapon(int type, unsigned long max_pages)
459{
460 void *array;
461 unsigned long array_size;
462 unsigned long length;
463 struct swap_cgroup_ctrl *ctrl;
464
465 if (!do_swap_account)
466 return 0;
467
33278f7f 468 length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
27a7faa0
KH
469 array_size = length * sizeof(void *);
470
8c1fec1b 471 array = vzalloc(array_size);
27a7faa0
KH
472 if (!array)
473 goto nomem;
474
27a7faa0
KH
475 ctrl = &swap_cgroup_ctrl[type];
476 mutex_lock(&swap_cgroup_mutex);
477 ctrl->length = length;
478 ctrl->map = array;
e9e58a4e 479 spin_lock_init(&ctrl->lock);
27a7faa0
KH
480 if (swap_cgroup_prepare(type)) {
481 /* memory shortage */
482 ctrl->map = NULL;
483 ctrl->length = 0;
27a7faa0 484 mutex_unlock(&swap_cgroup_mutex);
6a5b18d2 485 vfree(array);
27a7faa0
KH
486 goto nomem;
487 }
488 mutex_unlock(&swap_cgroup_mutex);
489
27a7faa0
KH
490 return 0;
491nomem:
492 printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
493 printk(KERN_INFO
00a66d29 494 "swap_cgroup can be disabled by swapaccount=0 boot option\n");
27a7faa0
KH
495 return -ENOMEM;
496}
497
498void swap_cgroup_swapoff(int type)
499{
6a5b18d2
NK
500 struct page **map;
501 unsigned long i, length;
27a7faa0
KH
502 struct swap_cgroup_ctrl *ctrl;
503
504 if (!do_swap_account)
505 return;
506
507 mutex_lock(&swap_cgroup_mutex);
508 ctrl = &swap_cgroup_ctrl[type];
6a5b18d2
NK
509 map = ctrl->map;
510 length = ctrl->length;
511 ctrl->map = NULL;
512 ctrl->length = 0;
513 mutex_unlock(&swap_cgroup_mutex);
514
515 if (map) {
516 for (i = 0; i < length; i++) {
517 struct page *page = map[i];
27a7faa0
KH
518 if (page)
519 __free_page(page);
520 }
6a5b18d2 521 vfree(map);
27a7faa0 522 }
27a7faa0
KH
523}
524
525#endif