Merge tag 'mxs-fixes-3.10' of git://git.linaro.org/people/shawnguo/linux-2.6 into...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
10ed273f 22#include <linux/jiffies.h>
1da177e4 23#include <linux/bootmem.h>
edbe7d23 24#include <linux/memblock.h>
1da177e4 25#include <linux/compiler.h>
9f158333 26#include <linux/kernel.h>
b1eeab67 27#include <linux/kmemcheck.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/suspend.h>
30#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/slab.h>
a238ab5b 33#include <linux/ratelimit.h>
5a3135c2 34#include <linux/oom.h>
1da177e4
LT
35#include <linux/notifier.h>
36#include <linux/topology.h>
37#include <linux/sysctl.h>
38#include <linux/cpu.h>
39#include <linux/cpuset.h>
bdc8cb98 40#include <linux/memory_hotplug.h>
1da177e4
LT
41#include <linux/nodemask.h>
42#include <linux/vmalloc.h>
a6cccdc3 43#include <linux/vmstat.h>
4be38e35 44#include <linux/mempolicy.h>
6811378e 45#include <linux/stop_machine.h>
c713216d
MG
46#include <linux/sort.h>
47#include <linux/pfn.h>
3fcfab16 48#include <linux/backing-dev.h>
933e312e 49#include <linux/fault-inject.h>
a5d76b54 50#include <linux/page-isolation.h>
52d4b9ac 51#include <linux/page_cgroup.h>
3ac7fe5a 52#include <linux/debugobjects.h>
dbb1f81c 53#include <linux/kmemleak.h>
56de7263 54#include <linux/compaction.h>
0d3d062a 55#include <trace/events/kmem.h>
718a3821 56#include <linux/ftrace_event.h>
f212ad7c 57#include <linux/memcontrol.h>
268bb0ce 58#include <linux/prefetch.h>
041d3a8c 59#include <linux/migrate.h>
c0a32fc5 60#include <linux/page-debug-flags.h>
949f7ec5 61#include <linux/hugetlb.h>
8bd75c77 62#include <linux/sched/rt.h>
1da177e4
LT
63
64#include <asm/tlbflush.h>
ac924c60 65#include <asm/div64.h>
1da177e4
LT
66#include "internal.h"
67
72812019
LS
68#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
69DEFINE_PER_CPU(int, numa_node);
70EXPORT_PER_CPU_SYMBOL(numa_node);
71#endif
72
7aac7898
LS
73#ifdef CONFIG_HAVE_MEMORYLESS_NODES
74/*
75 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
76 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
77 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
78 * defined in <linux/topology.h>.
79 */
80DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
81EXPORT_PER_CPU_SYMBOL(_numa_mem_);
82#endif
83
1da177e4 84/*
13808910 85 * Array of node states.
1da177e4 86 */
13808910
CL
87nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
88 [N_POSSIBLE] = NODE_MASK_ALL,
89 [N_ONLINE] = { { [0] = 1UL } },
90#ifndef CONFIG_NUMA
91 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
92#ifdef CONFIG_HIGHMEM
93 [N_HIGH_MEMORY] = { { [0] = 1UL } },
20b2f52b
LJ
94#endif
95#ifdef CONFIG_MOVABLE_NODE
96 [N_MEMORY] = { { [0] = 1UL } },
13808910
CL
97#endif
98 [N_CPU] = { { [0] = 1UL } },
99#endif /* NUMA */
100};
101EXPORT_SYMBOL(node_states);
102
6c231b7b 103unsigned long totalram_pages __read_mostly;
cb45b0e9 104unsigned long totalreserve_pages __read_mostly;
ab8fabd4
JW
105/*
106 * When calculating the number of globally allowed dirty pages, there
107 * is a certain number of per-zone reserves that should not be
108 * considered dirtyable memory. This is the sum of those reserves
109 * over all existing zones that contribute dirtyable memory.
110 */
111unsigned long dirty_balance_reserve __read_mostly;
112
1b76b02f 113int percpu_pagelist_fraction;
dcce284a 114gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
1da177e4 115
452aa699
RW
116#ifdef CONFIG_PM_SLEEP
117/*
118 * The following functions are used by the suspend/hibernate code to temporarily
119 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
120 * while devices are suspended. To avoid races with the suspend/hibernate code,
121 * they should always be called with pm_mutex held (gfp_allowed_mask also should
122 * only be modified with pm_mutex held, unless the suspend/hibernate code is
123 * guaranteed not to run in parallel with that modification).
124 */
c9e664f1
RW
125
126static gfp_t saved_gfp_mask;
127
128void pm_restore_gfp_mask(void)
452aa699
RW
129{
130 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
131 if (saved_gfp_mask) {
132 gfp_allowed_mask = saved_gfp_mask;
133 saved_gfp_mask = 0;
134 }
452aa699
RW
135}
136
c9e664f1 137void pm_restrict_gfp_mask(void)
452aa699 138{
452aa699 139 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
140 WARN_ON(saved_gfp_mask);
141 saved_gfp_mask = gfp_allowed_mask;
142 gfp_allowed_mask &= ~GFP_IOFS;
452aa699 143}
f90ac398
MG
144
145bool pm_suspended_storage(void)
146{
147 if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
148 return false;
149 return true;
150}
452aa699
RW
151#endif /* CONFIG_PM_SLEEP */
152
d9c23400
MG
153#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
154int pageblock_order __read_mostly;
155#endif
156
d98c7a09 157static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 158
1da177e4
LT
159/*
160 * results with 256, 32 in the lowmem_reserve sysctl:
161 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
162 * 1G machine -> (16M dma, 784M normal, 224M high)
163 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
164 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
165 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
166 *
167 * TBD: should special case ZONE_DMA32 machines here - in those we normally
168 * don't need any ZONE_NORMAL reservation
1da177e4 169 */
2f1b6248 170int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
4b51d669 171#ifdef CONFIG_ZONE_DMA
2f1b6248 172 256,
4b51d669 173#endif
fb0e7942 174#ifdef CONFIG_ZONE_DMA32
2f1b6248 175 256,
fb0e7942 176#endif
e53ef38d 177#ifdef CONFIG_HIGHMEM
2a1e274a 178 32,
e53ef38d 179#endif
2a1e274a 180 32,
2f1b6248 181};
1da177e4
LT
182
183EXPORT_SYMBOL(totalram_pages);
1da177e4 184
15ad7cdc 185static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 186#ifdef CONFIG_ZONE_DMA
2f1b6248 187 "DMA",
4b51d669 188#endif
fb0e7942 189#ifdef CONFIG_ZONE_DMA32
2f1b6248 190 "DMA32",
fb0e7942 191#endif
2f1b6248 192 "Normal",
e53ef38d 193#ifdef CONFIG_HIGHMEM
2a1e274a 194 "HighMem",
e53ef38d 195#endif
2a1e274a 196 "Movable",
2f1b6248
CL
197};
198
1da177e4
LT
199int min_free_kbytes = 1024;
200
2c85f51d
JB
201static unsigned long __meminitdata nr_kernel_pages;
202static unsigned long __meminitdata nr_all_pages;
a3142c8e 203static unsigned long __meminitdata dma_reserve;
1da177e4 204
0ee332c1
TH
205#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
206static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
207static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
208static unsigned long __initdata required_kernelcore;
209static unsigned long __initdata required_movablecore;
210static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
211
212/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
213int movable_zone;
214EXPORT_SYMBOL(movable_zone);
215#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 216
418508c1
MS
217#if MAX_NUMNODES > 1
218int nr_node_ids __read_mostly = MAX_NUMNODES;
62bc62a8 219int nr_online_nodes __read_mostly = 1;
418508c1 220EXPORT_SYMBOL(nr_node_ids);
62bc62a8 221EXPORT_SYMBOL(nr_online_nodes);
418508c1
MS
222#endif
223
9ef9acb0
MG
224int page_group_by_mobility_disabled __read_mostly;
225
ee6f509c 226void set_pageblock_migratetype(struct page *page, int migratetype)
b2a0ac88 227{
49255c61
MG
228
229 if (unlikely(page_group_by_mobility_disabled))
230 migratetype = MIGRATE_UNMOVABLE;
231
b2a0ac88
MG
232 set_pageblock_flags_group(page, (unsigned long)migratetype,
233 PB_migrate, PB_migrate_end);
234}
235
7f33d49a
RW
236bool oom_killer_disabled __read_mostly;
237
13e7444b 238#ifdef CONFIG_DEBUG_VM
c6a57e19 239static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 240{
bdc8cb98
DH
241 int ret = 0;
242 unsigned seq;
243 unsigned long pfn = page_to_pfn(page);
b5e6a5a2 244 unsigned long sp, start_pfn;
c6a57e19 245
bdc8cb98
DH
246 do {
247 seq = zone_span_seqbegin(zone);
b5e6a5a2
CS
248 start_pfn = zone->zone_start_pfn;
249 sp = zone->spanned_pages;
108bcc96 250 if (!zone_spans_pfn(zone, pfn))
bdc8cb98
DH
251 ret = 1;
252 } while (zone_span_seqretry(zone, seq));
253
b5e6a5a2
CS
254 if (ret)
255 pr_err("page %lu outside zone [ %lu - %lu ]\n",
256 pfn, start_pfn, start_pfn + sp);
257
bdc8cb98 258 return ret;
c6a57e19
DH
259}
260
261static int page_is_consistent(struct zone *zone, struct page *page)
262{
14e07298 263 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 264 return 0;
1da177e4 265 if (zone != page_zone(page))
c6a57e19
DH
266 return 0;
267
268 return 1;
269}
270/*
271 * Temporary debugging check for pages not lying within a given zone.
272 */
273static int bad_range(struct zone *zone, struct page *page)
274{
275 if (page_outside_zone_boundaries(zone, page))
1da177e4 276 return 1;
c6a57e19
DH
277 if (!page_is_consistent(zone, page))
278 return 1;
279
1da177e4
LT
280 return 0;
281}
13e7444b
NP
282#else
283static inline int bad_range(struct zone *zone, struct page *page)
284{
285 return 0;
286}
287#endif
288
224abf92 289static void bad_page(struct page *page)
1da177e4 290{
d936cf9b
HD
291 static unsigned long resume;
292 static unsigned long nr_shown;
293 static unsigned long nr_unshown;
294
2a7684a2
WF
295 /* Don't complain about poisoned pages */
296 if (PageHWPoison(page)) {
22b751c3 297 page_mapcount_reset(page); /* remove PageBuddy */
2a7684a2
WF
298 return;
299 }
300
d936cf9b
HD
301 /*
302 * Allow a burst of 60 reports, then keep quiet for that minute;
303 * or allow a steady drip of one report per second.
304 */
305 if (nr_shown == 60) {
306 if (time_before(jiffies, resume)) {
307 nr_unshown++;
308 goto out;
309 }
310 if (nr_unshown) {
1e9e6365
HD
311 printk(KERN_ALERT
312 "BUG: Bad page state: %lu messages suppressed\n",
d936cf9b
HD
313 nr_unshown);
314 nr_unshown = 0;
315 }
316 nr_shown = 0;
317 }
318 if (nr_shown++ == 0)
319 resume = jiffies + 60 * HZ;
320
1e9e6365 321 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
3dc14741 322 current->comm, page_to_pfn(page));
718a3821 323 dump_page(page);
3dc14741 324
4f31888c 325 print_modules();
1da177e4 326 dump_stack();
d936cf9b 327out:
8cc3b392 328 /* Leave bad fields for debug, except PageBuddy could make trouble */
22b751c3 329 page_mapcount_reset(page); /* remove PageBuddy */
373d4d09 330 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1da177e4
LT
331}
332
1da177e4
LT
333/*
334 * Higher-order pages are called "compound pages". They are structured thusly:
335 *
336 * The first PAGE_SIZE page is called the "head page".
337 *
338 * The remaining PAGE_SIZE pages are called "tail pages".
339 *
6416b9fa
WSH
340 * All pages have PG_compound set. All tail pages have their ->first_page
341 * pointing at the head page.
1da177e4 342 *
41d78ba5
HD
343 * The first tail page's ->lru.next holds the address of the compound page's
344 * put_page() function. Its ->lru.prev holds the order of allocation.
345 * This usage means that zero-order pages may not be compound.
1da177e4 346 */
d98c7a09
HD
347
348static void free_compound_page(struct page *page)
349{
d85f3385 350 __free_pages_ok(page, compound_order(page));
d98c7a09
HD
351}
352
01ad1c08 353void prep_compound_page(struct page *page, unsigned long order)
18229df5
AW
354{
355 int i;
356 int nr_pages = 1 << order;
357
358 set_compound_page_dtor(page, free_compound_page);
359 set_compound_order(page, order);
360 __SetPageHead(page);
361 for (i = 1; i < nr_pages; i++) {
362 struct page *p = page + i;
18229df5 363 __SetPageTail(p);
58a84aa9 364 set_page_count(p, 0);
18229df5
AW
365 p->first_page = page;
366 }
367}
368
59ff4216 369/* update __split_huge_page_refcount if you change this function */
8cc3b392 370static int destroy_compound_page(struct page *page, unsigned long order)
1da177e4
LT
371{
372 int i;
373 int nr_pages = 1 << order;
8cc3b392 374 int bad = 0;
1da177e4 375
0bb2c763 376 if (unlikely(compound_order(page) != order)) {
224abf92 377 bad_page(page);
8cc3b392
HD
378 bad++;
379 }
1da177e4 380
6d777953 381 __ClearPageHead(page);
8cc3b392 382
18229df5
AW
383 for (i = 1; i < nr_pages; i++) {
384 struct page *p = page + i;
1da177e4 385
e713a21d 386 if (unlikely(!PageTail(p) || (p->first_page != page))) {
224abf92 387 bad_page(page);
8cc3b392
HD
388 bad++;
389 }
d85f3385 390 __ClearPageTail(p);
1da177e4 391 }
8cc3b392
HD
392
393 return bad;
1da177e4 394}
1da177e4 395
17cf4406
NP
396static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
397{
398 int i;
399
6626c5d5
AM
400 /*
401 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
402 * and __GFP_HIGHMEM from hard or soft interrupt context.
403 */
725d704e 404 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
17cf4406
NP
405 for (i = 0; i < (1 << order); i++)
406 clear_highpage(page + i);
407}
408
c0a32fc5
SG
409#ifdef CONFIG_DEBUG_PAGEALLOC
410unsigned int _debug_guardpage_minorder;
411
412static int __init debug_guardpage_minorder_setup(char *buf)
413{
414 unsigned long res;
415
416 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
417 printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
418 return 0;
419 }
420 _debug_guardpage_minorder = res;
421 printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
422 return 0;
423}
424__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
425
426static inline void set_page_guard_flag(struct page *page)
427{
428 __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
429}
430
431static inline void clear_page_guard_flag(struct page *page)
432{
433 __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
434}
435#else
436static inline void set_page_guard_flag(struct page *page) { }
437static inline void clear_page_guard_flag(struct page *page) { }
438#endif
439
6aa3001b
AM
440static inline void set_page_order(struct page *page, int order)
441{
4c21e2f2 442 set_page_private(page, order);
676165a8 443 __SetPageBuddy(page);
1da177e4
LT
444}
445
446static inline void rmv_page_order(struct page *page)
447{
676165a8 448 __ClearPageBuddy(page);
4c21e2f2 449 set_page_private(page, 0);
1da177e4
LT
450}
451
452/*
453 * Locate the struct page for both the matching buddy in our
454 * pair (buddy1) and the combined O(n+1) page they form (page).
455 *
456 * 1) Any buddy B1 will have an order O twin B2 which satisfies
457 * the following equation:
458 * B2 = B1 ^ (1 << O)
459 * For example, if the starting buddy (buddy2) is #8 its order
460 * 1 buddy is #10:
461 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
462 *
463 * 2) Any buddy B will have an order O+1 parent P which
464 * satisfies the following equation:
465 * P = B & ~(1 << O)
466 *
d6e05edc 467 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
1da177e4 468 */
1da177e4 469static inline unsigned long
43506fad 470__find_buddy_index(unsigned long page_idx, unsigned int order)
1da177e4 471{
43506fad 472 return page_idx ^ (1 << order);
1da177e4
LT
473}
474
475/*
476 * This function checks whether a page is free && is the buddy
477 * we can do coalesce a page and its buddy if
13e7444b 478 * (a) the buddy is not in a hole &&
676165a8 479 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
480 * (c) a page and its buddy have the same order &&
481 * (d) a page and its buddy are in the same zone.
676165a8 482 *
5f24ce5f
AA
483 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
484 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
1da177e4 485 *
676165a8 486 * For recording page's order, we use page_private(page).
1da177e4 487 */
cb2b95e1
AW
488static inline int page_is_buddy(struct page *page, struct page *buddy,
489 int order)
1da177e4 490{
14e07298 491 if (!pfn_valid_within(page_to_pfn(buddy)))
13e7444b 492 return 0;
13e7444b 493
cb2b95e1
AW
494 if (page_zone_id(page) != page_zone_id(buddy))
495 return 0;
496
c0a32fc5
SG
497 if (page_is_guard(buddy) && page_order(buddy) == order) {
498 VM_BUG_ON(page_count(buddy) != 0);
499 return 1;
500 }
501
cb2b95e1 502 if (PageBuddy(buddy) && page_order(buddy) == order) {
a3af9c38 503 VM_BUG_ON(page_count(buddy) != 0);
6aa3001b 504 return 1;
676165a8 505 }
6aa3001b 506 return 0;
1da177e4
LT
507}
508
509/*
510 * Freeing function for a buddy system allocator.
511 *
512 * The concept of a buddy system is to maintain direct-mapped table
513 * (containing bit values) for memory blocks of various "orders".
514 * The bottom level table contains the map for the smallest allocatable
515 * units of memory (here, pages), and each level above it describes
516 * pairs of units from the levels below, hence, "buddies".
517 * At a high level, all that happens here is marking the table entry
518 * at the bottom level available, and propagating the changes upward
519 * as necessary, plus some accounting needed to play nicely with other
520 * parts of the VM system.
521 * At each level, we keep a list of pages, which are heads of continuous
5f24ce5f 522 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
4c21e2f2 523 * order is recorded in page_private(page) field.
1da177e4 524 * So when we are allocating or freeing one, we can derive the state of the
5f63b720
MN
525 * other. That is, if we allocate a small block, and both were
526 * free, the remainder of the region must be split into blocks.
1da177e4 527 * If a block is freed, and its buddy is also free, then this
5f63b720 528 * triggers coalescing into a block of larger size.
1da177e4 529 *
6d49e352 530 * -- nyc
1da177e4
LT
531 */
532
48db57f8 533static inline void __free_one_page(struct page *page,
ed0ae21d
MG
534 struct zone *zone, unsigned int order,
535 int migratetype)
1da177e4
LT
536{
537 unsigned long page_idx;
6dda9d55 538 unsigned long combined_idx;
43506fad 539 unsigned long uninitialized_var(buddy_idx);
6dda9d55 540 struct page *buddy;
1da177e4 541
d29bb978
CS
542 VM_BUG_ON(!zone_is_initialized(zone));
543
224abf92 544 if (unlikely(PageCompound(page)))
8cc3b392
HD
545 if (unlikely(destroy_compound_page(page, order)))
546 return;
1da177e4 547
ed0ae21d
MG
548 VM_BUG_ON(migratetype == -1);
549
1da177e4
LT
550 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
551
f2260e6b 552 VM_BUG_ON(page_idx & ((1 << order) - 1));
725d704e 553 VM_BUG_ON(bad_range(zone, page));
1da177e4 554
1da177e4 555 while (order < MAX_ORDER-1) {
43506fad
KC
556 buddy_idx = __find_buddy_index(page_idx, order);
557 buddy = page + (buddy_idx - page_idx);
cb2b95e1 558 if (!page_is_buddy(page, buddy, order))
3c82d0ce 559 break;
c0a32fc5
SG
560 /*
561 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
562 * merge with it and move up one order.
563 */
564 if (page_is_guard(buddy)) {
565 clear_page_guard_flag(buddy);
566 set_page_private(page, 0);
d1ce749a
BZ
567 __mod_zone_freepage_state(zone, 1 << order,
568 migratetype);
c0a32fc5
SG
569 } else {
570 list_del(&buddy->lru);
571 zone->free_area[order].nr_free--;
572 rmv_page_order(buddy);
573 }
43506fad 574 combined_idx = buddy_idx & page_idx;
1da177e4
LT
575 page = page + (combined_idx - page_idx);
576 page_idx = combined_idx;
577 order++;
578 }
579 set_page_order(page, order);
6dda9d55
CZ
580
581 /*
582 * If this is not the largest possible page, check if the buddy
583 * of the next-highest order is free. If it is, it's possible
584 * that pages are being freed that will coalesce soon. In case,
585 * that is happening, add the free page to the tail of the list
586 * so it's less likely to be used soon and more likely to be merged
587 * as a higher order page
588 */
b7f50cfa 589 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
6dda9d55 590 struct page *higher_page, *higher_buddy;
43506fad
KC
591 combined_idx = buddy_idx & page_idx;
592 higher_page = page + (combined_idx - page_idx);
593 buddy_idx = __find_buddy_index(combined_idx, order + 1);
0ba8f2d5 594 higher_buddy = higher_page + (buddy_idx - combined_idx);
6dda9d55
CZ
595 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
596 list_add_tail(&page->lru,
597 &zone->free_area[order].free_list[migratetype]);
598 goto out;
599 }
600 }
601
602 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
603out:
1da177e4
LT
604 zone->free_area[order].nr_free++;
605}
606
224abf92 607static inline int free_pages_check(struct page *page)
1da177e4 608{
92be2e33
NP
609 if (unlikely(page_mapcount(page) |
610 (page->mapping != NULL) |
a3af9c38 611 (atomic_read(&page->_count) != 0) |
f212ad7c
DN
612 (page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
613 (mem_cgroup_bad_page_check(page)))) {
224abf92 614 bad_page(page);
79f4b7bf 615 return 1;
8cc3b392 616 }
22b751c3 617 page_nid_reset_last(page);
79f4b7bf
HD
618 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
619 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
620 return 0;
1da177e4
LT
621}
622
623/*
5f8dcc21 624 * Frees a number of pages from the PCP lists
1da177e4 625 * Assumes all pages on list are in same zone, and of same order.
207f36ee 626 * count is the number of pages to free.
1da177e4
LT
627 *
628 * If the zone was previously in an "all pages pinned" state then look to
629 * see if this freeing clears that state.
630 *
631 * And clear the zone's pages_scanned counter, to hold off the "all pages are
632 * pinned" detection logic.
633 */
5f8dcc21
MG
634static void free_pcppages_bulk(struct zone *zone, int count,
635 struct per_cpu_pages *pcp)
1da177e4 636{
5f8dcc21 637 int migratetype = 0;
a6f9edd6 638 int batch_free = 0;
72853e29 639 int to_free = count;
5f8dcc21 640
c54ad30c 641 spin_lock(&zone->lock);
93e4a89a 642 zone->all_unreclaimable = 0;
1da177e4 643 zone->pages_scanned = 0;
f2260e6b 644
72853e29 645 while (to_free) {
48db57f8 646 struct page *page;
5f8dcc21
MG
647 struct list_head *list;
648
649 /*
a6f9edd6
MG
650 * Remove pages from lists in a round-robin fashion. A
651 * batch_free count is maintained that is incremented when an
652 * empty list is encountered. This is so more pages are freed
653 * off fuller lists instead of spinning excessively around empty
654 * lists
5f8dcc21
MG
655 */
656 do {
a6f9edd6 657 batch_free++;
5f8dcc21
MG
658 if (++migratetype == MIGRATE_PCPTYPES)
659 migratetype = 0;
660 list = &pcp->lists[migratetype];
661 } while (list_empty(list));
48db57f8 662
1d16871d
NK
663 /* This is the only non-empty list. Free them all. */
664 if (batch_free == MIGRATE_PCPTYPES)
665 batch_free = to_free;
666
a6f9edd6 667 do {
770c8aaa
BZ
668 int mt; /* migratetype of the to-be-freed page */
669
a6f9edd6
MG
670 page = list_entry(list->prev, struct page, lru);
671 /* must delete as __free_one_page list manipulates */
672 list_del(&page->lru);
b12c4ad1 673 mt = get_freepage_migratetype(page);
a7016235 674 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
770c8aaa
BZ
675 __free_one_page(page, zone, 0, mt);
676 trace_mm_page_pcpu_drain(page, 0, mt);
194159fb 677 if (likely(!is_migrate_isolate_page(page))) {
97d0da22
WC
678 __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
679 if (is_migrate_cma(mt))
680 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
681 }
72853e29 682 } while (--to_free && --batch_free && !list_empty(list));
1da177e4 683 }
c54ad30c 684 spin_unlock(&zone->lock);
1da177e4
LT
685}
686
ed0ae21d
MG
687static void free_one_page(struct zone *zone, struct page *page, int order,
688 int migratetype)
1da177e4 689{
006d22d9 690 spin_lock(&zone->lock);
93e4a89a 691 zone->all_unreclaimable = 0;
006d22d9 692 zone->pages_scanned = 0;
f2260e6b 693
ed0ae21d 694 __free_one_page(page, zone, order, migratetype);
194159fb 695 if (unlikely(!is_migrate_isolate(migratetype)))
d1ce749a 696 __mod_zone_freepage_state(zone, 1 << order, migratetype);
006d22d9 697 spin_unlock(&zone->lock);
48db57f8
NP
698}
699
ec95f53a 700static bool free_pages_prepare(struct page *page, unsigned int order)
48db57f8 701{
1da177e4 702 int i;
8cc3b392 703 int bad = 0;
1da177e4 704
b413d48a 705 trace_mm_page_free(page, order);
b1eeab67
VN
706 kmemcheck_free_shadow(page, order);
707
8dd60a3a
AA
708 if (PageAnon(page))
709 page->mapping = NULL;
710 for (i = 0; i < (1 << order); i++)
711 bad += free_pages_check(page + i);
8cc3b392 712 if (bad)
ec95f53a 713 return false;
689bcebf 714
3ac7fe5a 715 if (!PageHighMem(page)) {
9858db50 716 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
3ac7fe5a
TG
717 debug_check_no_obj_freed(page_address(page),
718 PAGE_SIZE << order);
719 }
dafb1367 720 arch_free_page(page, order);
48db57f8 721 kernel_map_pages(page, 1 << order, 0);
dafb1367 722
ec95f53a
KM
723 return true;
724}
725
726static void __free_pages_ok(struct page *page, unsigned int order)
727{
728 unsigned long flags;
95e34412 729 int migratetype;
ec95f53a
KM
730
731 if (!free_pages_prepare(page, order))
732 return;
733
c54ad30c 734 local_irq_save(flags);
f8891e5e 735 __count_vm_events(PGFREE, 1 << order);
95e34412
MK
736 migratetype = get_pageblock_migratetype(page);
737 set_freepage_migratetype(page, migratetype);
738 free_one_page(page_zone(page), page, order, migratetype);
c54ad30c 739 local_irq_restore(flags);
1da177e4
LT
740}
741
9feedc9d
JL
742/*
743 * Read access to zone->managed_pages is safe because it's unsigned long,
744 * but we still need to serialize writers. Currently all callers of
745 * __free_pages_bootmem() except put_page_bootmem() should only be used
746 * at boot time. So for shorter boot time, we shift the burden to
747 * put_page_bootmem() to serialize writers.
748 */
af370fb8 749void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
a226f6c8 750{
c3993076
JW
751 unsigned int nr_pages = 1 << order;
752 unsigned int loop;
a226f6c8 753
c3993076
JW
754 prefetchw(page);
755 for (loop = 0; loop < nr_pages; loop++) {
756 struct page *p = &page[loop];
757
758 if (loop + 1 < nr_pages)
759 prefetchw(p + 1);
760 __ClearPageReserved(p);
761 set_page_count(p, 0);
a226f6c8 762 }
c3993076 763
9feedc9d 764 page_zone(page)->managed_pages += 1 << order;
c3993076
JW
765 set_page_refcounted(page);
766 __free_pages(page, order);
a226f6c8
DH
767}
768
47118af0
MN
769#ifdef CONFIG_CMA
770/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
771void __init init_cma_reserved_pageblock(struct page *page)
772{
773 unsigned i = pageblock_nr_pages;
774 struct page *p = page;
775
776 do {
777 __ClearPageReserved(p);
778 set_page_count(p, 0);
779 } while (++p, --i);
780
781 set_page_refcounted(page);
782 set_pageblock_migratetype(page, MIGRATE_CMA);
783 __free_pages(page, pageblock_order);
784 totalram_pages += pageblock_nr_pages;
41a79734
MS
785#ifdef CONFIG_HIGHMEM
786 if (PageHighMem(page))
787 totalhigh_pages += pageblock_nr_pages;
788#endif
47118af0
MN
789}
790#endif
1da177e4
LT
791
792/*
793 * The order of subdivision here is critical for the IO subsystem.
794 * Please do not alter this order without good reasons and regression
795 * testing. Specifically, as large blocks of memory are subdivided,
796 * the order in which smaller blocks are delivered depends on the order
797 * they're subdivided in this function. This is the primary factor
798 * influencing the order in which pages are delivered to the IO
799 * subsystem according to empirical testing, and this is also justified
800 * by considering the behavior of a buddy system containing a single
801 * large block of memory acted on by a series of small allocations.
802 * This behavior is a critical factor in sglist merging's success.
803 *
6d49e352 804 * -- nyc
1da177e4 805 */
085cc7d5 806static inline void expand(struct zone *zone, struct page *page,
b2a0ac88
MG
807 int low, int high, struct free_area *area,
808 int migratetype)
1da177e4
LT
809{
810 unsigned long size = 1 << high;
811
812 while (high > low) {
813 area--;
814 high--;
815 size >>= 1;
725d704e 816 VM_BUG_ON(bad_range(zone, &page[size]));
c0a32fc5
SG
817
818#ifdef CONFIG_DEBUG_PAGEALLOC
819 if (high < debug_guardpage_minorder()) {
820 /*
821 * Mark as guard pages (or page), that will allow to
822 * merge back to allocator when buddy will be freed.
823 * Corresponding page table entries will not be touched,
824 * pages will stay not present in virtual address space
825 */
826 INIT_LIST_HEAD(&page[size].lru);
827 set_page_guard_flag(&page[size]);
828 set_page_private(&page[size], high);
829 /* Guard pages are not available for any usage */
d1ce749a
BZ
830 __mod_zone_freepage_state(zone, -(1 << high),
831 migratetype);
c0a32fc5
SG
832 continue;
833 }
834#endif
b2a0ac88 835 list_add(&page[size].lru, &area->free_list[migratetype]);
1da177e4
LT
836 area->nr_free++;
837 set_page_order(&page[size], high);
838 }
1da177e4
LT
839}
840
1da177e4
LT
841/*
842 * This page is about to be returned from the page allocator
843 */
2a7684a2 844static inline int check_new_page(struct page *page)
1da177e4 845{
92be2e33
NP
846 if (unlikely(page_mapcount(page) |
847 (page->mapping != NULL) |
a3af9c38 848 (atomic_read(&page->_count) != 0) |
f212ad7c
DN
849 (page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
850 (mem_cgroup_bad_page_check(page)))) {
224abf92 851 bad_page(page);
689bcebf 852 return 1;
8cc3b392 853 }
2a7684a2
WF
854 return 0;
855}
856
857static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
858{
859 int i;
860
861 for (i = 0; i < (1 << order); i++) {
862 struct page *p = page + i;
863 if (unlikely(check_new_page(p)))
864 return 1;
865 }
689bcebf 866
4c21e2f2 867 set_page_private(page, 0);
7835e98b 868 set_page_refcounted(page);
cc102509
NP
869
870 arch_alloc_page(page, order);
1da177e4 871 kernel_map_pages(page, 1 << order, 1);
17cf4406
NP
872
873 if (gfp_flags & __GFP_ZERO)
874 prep_zero_page(page, order, gfp_flags);
875
876 if (order && (gfp_flags & __GFP_COMP))
877 prep_compound_page(page, order);
878
689bcebf 879 return 0;
1da177e4
LT
880}
881
56fd56b8
MG
882/*
883 * Go through the free lists for the given migratetype and remove
884 * the smallest available page from the freelists
885 */
728ec980
MG
886static inline
887struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
56fd56b8
MG
888 int migratetype)
889{
890 unsigned int current_order;
891 struct free_area * area;
892 struct page *page;
893
894 /* Find a page of the appropriate size in the preferred list */
895 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
896 area = &(zone->free_area[current_order]);
897 if (list_empty(&area->free_list[migratetype]))
898 continue;
899
900 page = list_entry(area->free_list[migratetype].next,
901 struct page, lru);
902 list_del(&page->lru);
903 rmv_page_order(page);
904 area->nr_free--;
56fd56b8
MG
905 expand(zone, page, order, current_order, area, migratetype);
906 return page;
907 }
908
909 return NULL;
910}
911
912
b2a0ac88
MG
913/*
914 * This array describes the order lists are fallen back to when
915 * the free lists for the desirable migrate type are depleted
916 */
47118af0
MN
917static int fallbacks[MIGRATE_TYPES][4] = {
918 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
919 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
920#ifdef CONFIG_CMA
921 [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
922 [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
923#else
924 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
925#endif
6d4a4916 926 [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
194159fb 927#ifdef CONFIG_MEMORY_ISOLATION
6d4a4916 928 [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
194159fb 929#endif
b2a0ac88
MG
930};
931
c361be55
MG
932/*
933 * Move the free pages in a range to the free lists of the requested type.
d9c23400 934 * Note that start_page and end_pages are not aligned on a pageblock
c361be55
MG
935 * boundary. If alignment is required, use move_freepages_block()
936 */
435b405c 937int move_freepages(struct zone *zone,
b69a7288
AB
938 struct page *start_page, struct page *end_page,
939 int migratetype)
c361be55
MG
940{
941 struct page *page;
942 unsigned long order;
d100313f 943 int pages_moved = 0;
c361be55
MG
944
945#ifndef CONFIG_HOLES_IN_ZONE
946 /*
947 * page_zone is not safe to call in this context when
948 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
949 * anyway as we check zone boundaries in move_freepages_block().
950 * Remove at a later date when no bug reports exist related to
ac0e5b7a 951 * grouping pages by mobility
c361be55
MG
952 */
953 BUG_ON(page_zone(start_page) != page_zone(end_page));
954#endif
955
956 for (page = start_page; page <= end_page;) {
344c790e
AL
957 /* Make sure we are not inadvertently changing nodes */
958 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
959
c361be55
MG
960 if (!pfn_valid_within(page_to_pfn(page))) {
961 page++;
962 continue;
963 }
964
965 if (!PageBuddy(page)) {
966 page++;
967 continue;
968 }
969
970 order = page_order(page);
84be48d8
KS
971 list_move(&page->lru,
972 &zone->free_area[order].free_list[migratetype]);
95e34412 973 set_freepage_migratetype(page, migratetype);
c361be55 974 page += 1 << order;
d100313f 975 pages_moved += 1 << order;
c361be55
MG
976 }
977
d100313f 978 return pages_moved;
c361be55
MG
979}
980
ee6f509c 981int move_freepages_block(struct zone *zone, struct page *page,
68e3e926 982 int migratetype)
c361be55
MG
983{
984 unsigned long start_pfn, end_pfn;
985 struct page *start_page, *end_page;
986
987 start_pfn = page_to_pfn(page);
d9c23400 988 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
c361be55 989 start_page = pfn_to_page(start_pfn);
d9c23400
MG
990 end_page = start_page + pageblock_nr_pages - 1;
991 end_pfn = start_pfn + pageblock_nr_pages - 1;
c361be55
MG
992
993 /* Do not cross zone boundaries */
108bcc96 994 if (!zone_spans_pfn(zone, start_pfn))
c361be55 995 start_page = page;
108bcc96 996 if (!zone_spans_pfn(zone, end_pfn))
c361be55
MG
997 return 0;
998
999 return move_freepages(zone, start_page, end_page, migratetype);
1000}
1001
2f66a68f
MG
1002static void change_pageblock_range(struct page *pageblock_page,
1003 int start_order, int migratetype)
1004{
1005 int nr_pageblocks = 1 << (start_order - pageblock_order);
1006
1007 while (nr_pageblocks--) {
1008 set_pageblock_migratetype(pageblock_page, migratetype);
1009 pageblock_page += pageblock_nr_pages;
1010 }
1011}
1012
b2a0ac88 1013/* Remove an element from the buddy allocator from the fallback list */
0ac3a409
MG
1014static inline struct page *
1015__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
b2a0ac88
MG
1016{
1017 struct free_area * area;
1018 int current_order;
1019 struct page *page;
1020 int migratetype, i;
1021
1022 /* Find the largest possible block of pages in the other list */
1023 for (current_order = MAX_ORDER-1; current_order >= order;
1024 --current_order) {
6d4a4916 1025 for (i = 0;; i++) {
b2a0ac88
MG
1026 migratetype = fallbacks[start_migratetype][i];
1027
56fd56b8
MG
1028 /* MIGRATE_RESERVE handled later if necessary */
1029 if (migratetype == MIGRATE_RESERVE)
6d4a4916 1030 break;
e010487d 1031
b2a0ac88
MG
1032 area = &(zone->free_area[current_order]);
1033 if (list_empty(&area->free_list[migratetype]))
1034 continue;
1035
1036 page = list_entry(area->free_list[migratetype].next,
1037 struct page, lru);
1038 area->nr_free--;
1039
1040 /*
c361be55 1041 * If breaking a large block of pages, move all free
46dafbca
MG
1042 * pages to the preferred allocation list. If falling
1043 * back for a reclaimable kernel allocation, be more
25985edc 1044 * aggressive about taking ownership of free pages
47118af0
MN
1045 *
1046 * On the other hand, never change migration
1047 * type of MIGRATE_CMA pageblocks nor move CMA
1048 * pages on different free lists. We don't
1049 * want unmovable pages to be allocated from
1050 * MIGRATE_CMA areas.
b2a0ac88 1051 */
47118af0
MN
1052 if (!is_migrate_cma(migratetype) &&
1053 (unlikely(current_order >= pageblock_order / 2) ||
1054 start_migratetype == MIGRATE_RECLAIMABLE ||
1055 page_group_by_mobility_disabled)) {
1056 int pages;
46dafbca
MG
1057 pages = move_freepages_block(zone, page,
1058 start_migratetype);
1059
1060 /* Claim the whole block if over half of it is free */
dd5d241e
MG
1061 if (pages >= (1 << (pageblock_order-1)) ||
1062 page_group_by_mobility_disabled)
46dafbca
MG
1063 set_pageblock_migratetype(page,
1064 start_migratetype);
1065
b2a0ac88 1066 migratetype = start_migratetype;
c361be55 1067 }
b2a0ac88
MG
1068
1069 /* Remove the page from the freelists */
1070 list_del(&page->lru);
1071 rmv_page_order(page);
b2a0ac88 1072
2f66a68f 1073 /* Take ownership for orders >= pageblock_order */
47118af0
MN
1074 if (current_order >= pageblock_order &&
1075 !is_migrate_cma(migratetype))
2f66a68f 1076 change_pageblock_range(page, current_order,
b2a0ac88
MG
1077 start_migratetype);
1078
47118af0
MN
1079 expand(zone, page, order, current_order, area,
1080 is_migrate_cma(migratetype)
1081 ? migratetype : start_migratetype);
e0fff1bd
MG
1082
1083 trace_mm_page_alloc_extfrag(page, order, current_order,
1084 start_migratetype, migratetype);
1085
b2a0ac88
MG
1086 return page;
1087 }
1088 }
1089
728ec980 1090 return NULL;
b2a0ac88
MG
1091}
1092
56fd56b8 1093/*
1da177e4
LT
1094 * Do the hard work of removing an element from the buddy allocator.
1095 * Call me with the zone->lock already held.
1096 */
b2a0ac88
MG
1097static struct page *__rmqueue(struct zone *zone, unsigned int order,
1098 int migratetype)
1da177e4 1099{
1da177e4
LT
1100 struct page *page;
1101
728ec980 1102retry_reserve:
56fd56b8 1103 page = __rmqueue_smallest(zone, order, migratetype);
b2a0ac88 1104
728ec980 1105 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
56fd56b8 1106 page = __rmqueue_fallback(zone, order, migratetype);
b2a0ac88 1107
728ec980
MG
1108 /*
1109 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1110 * is used because __rmqueue_smallest is an inline function
1111 * and we want just one call site
1112 */
1113 if (!page) {
1114 migratetype = MIGRATE_RESERVE;
1115 goto retry_reserve;
1116 }
1117 }
1118
0d3d062a 1119 trace_mm_page_alloc_zone_locked(page, order, migratetype);
b2a0ac88 1120 return page;
1da177e4
LT
1121}
1122
5f63b720 1123/*
1da177e4
LT
1124 * Obtain a specified number of elements from the buddy allocator, all under
1125 * a single hold of the lock, for efficiency. Add them to the supplied list.
1126 * Returns the number of new pages which were placed at *list.
1127 */
5f63b720 1128static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88 1129 unsigned long count, struct list_head *list,
e084b2d9 1130 int migratetype, int cold)
1da177e4 1131{
47118af0 1132 int mt = migratetype, i;
5f63b720 1133
c54ad30c 1134 spin_lock(&zone->lock);
1da177e4 1135 for (i = 0; i < count; ++i) {
b2a0ac88 1136 struct page *page = __rmqueue(zone, order, migratetype);
085cc7d5 1137 if (unlikely(page == NULL))
1da177e4 1138 break;
81eabcbe
MG
1139
1140 /*
1141 * Split buddy pages returned by expand() are received here
1142 * in physical page order. The page is added to the callers and
1143 * list and the list head then moves forward. From the callers
1144 * perspective, the linked list is ordered by page number in
1145 * some conditions. This is useful for IO devices that can
1146 * merge IO requests if the physical pages are ordered
1147 * properly.
1148 */
e084b2d9
MG
1149 if (likely(cold == 0))
1150 list_add(&page->lru, list);
1151 else
1152 list_add_tail(&page->lru, list);
47118af0
MN
1153 if (IS_ENABLED(CONFIG_CMA)) {
1154 mt = get_pageblock_migratetype(page);
194159fb 1155 if (!is_migrate_cma(mt) && !is_migrate_isolate(mt))
47118af0
MN
1156 mt = migratetype;
1157 }
b12c4ad1 1158 set_freepage_migratetype(page, mt);
81eabcbe 1159 list = &page->lru;
d1ce749a
BZ
1160 if (is_migrate_cma(mt))
1161 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1162 -(1 << order));
1da177e4 1163 }
f2260e6b 1164 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
c54ad30c 1165 spin_unlock(&zone->lock);
085cc7d5 1166 return i;
1da177e4
LT
1167}
1168
4ae7c039 1169#ifdef CONFIG_NUMA
8fce4d8e 1170/*
4037d452
CL
1171 * Called from the vmstat counter updater to drain pagesets of this
1172 * currently executing processor on remote nodes after they have
1173 * expired.
1174 *
879336c3
CL
1175 * Note that this function must be called with the thread pinned to
1176 * a single processor.
8fce4d8e 1177 */
4037d452 1178void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 1179{
4ae7c039 1180 unsigned long flags;
4037d452 1181 int to_drain;
4ae7c039 1182
4037d452
CL
1183 local_irq_save(flags);
1184 if (pcp->count >= pcp->batch)
1185 to_drain = pcp->batch;
1186 else
1187 to_drain = pcp->count;
2a13515c
KM
1188 if (to_drain > 0) {
1189 free_pcppages_bulk(zone, to_drain, pcp);
1190 pcp->count -= to_drain;
1191 }
4037d452 1192 local_irq_restore(flags);
4ae7c039
CL
1193}
1194#endif
1195
9f8f2172
CL
1196/*
1197 * Drain pages of the indicated processor.
1198 *
1199 * The processor must either be the current processor and the
1200 * thread pinned to the current processor or a processor that
1201 * is not online.
1202 */
1203static void drain_pages(unsigned int cpu)
1da177e4 1204{
c54ad30c 1205 unsigned long flags;
1da177e4 1206 struct zone *zone;
1da177e4 1207
ee99c71c 1208 for_each_populated_zone(zone) {
1da177e4 1209 struct per_cpu_pageset *pset;
3dfa5721 1210 struct per_cpu_pages *pcp;
1da177e4 1211
99dcc3e5
CL
1212 local_irq_save(flags);
1213 pset = per_cpu_ptr(zone->pageset, cpu);
3dfa5721
CL
1214
1215 pcp = &pset->pcp;
2ff754fa
DR
1216 if (pcp->count) {
1217 free_pcppages_bulk(zone, pcp->count, pcp);
1218 pcp->count = 0;
1219 }
3dfa5721 1220 local_irq_restore(flags);
1da177e4
LT
1221 }
1222}
1da177e4 1223
9f8f2172
CL
1224/*
1225 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1226 */
1227void drain_local_pages(void *arg)
1228{
1229 drain_pages(smp_processor_id());
1230}
1231
1232/*
74046494
GBY
1233 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
1234 *
1235 * Note that this code is protected against sending an IPI to an offline
1236 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
1237 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
1238 * nothing keeps CPUs from showing up after we populated the cpumask and
1239 * before the call to on_each_cpu_mask().
9f8f2172
CL
1240 */
1241void drain_all_pages(void)
1242{
74046494
GBY
1243 int cpu;
1244 struct per_cpu_pageset *pcp;
1245 struct zone *zone;
1246
1247 /*
1248 * Allocate in the BSS so we wont require allocation in
1249 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
1250 */
1251 static cpumask_t cpus_with_pcps;
1252
1253 /*
1254 * We don't care about racing with CPU hotplug event
1255 * as offline notification will cause the notified
1256 * cpu to drain that CPU pcps and on_each_cpu_mask
1257 * disables preemption as part of its processing
1258 */
1259 for_each_online_cpu(cpu) {
1260 bool has_pcps = false;
1261 for_each_populated_zone(zone) {
1262 pcp = per_cpu_ptr(zone->pageset, cpu);
1263 if (pcp->pcp.count) {
1264 has_pcps = true;
1265 break;
1266 }
1267 }
1268 if (has_pcps)
1269 cpumask_set_cpu(cpu, &cpus_with_pcps);
1270 else
1271 cpumask_clear_cpu(cpu, &cpus_with_pcps);
1272 }
1273 on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
9f8f2172
CL
1274}
1275
296699de 1276#ifdef CONFIG_HIBERNATION
1da177e4
LT
1277
1278void mark_free_pages(struct zone *zone)
1279{
f623f0db
RW
1280 unsigned long pfn, max_zone_pfn;
1281 unsigned long flags;
b2a0ac88 1282 int order, t;
1da177e4
LT
1283 struct list_head *curr;
1284
1285 if (!zone->spanned_pages)
1286 return;
1287
1288 spin_lock_irqsave(&zone->lock, flags);
f623f0db 1289
108bcc96 1290 max_zone_pfn = zone_end_pfn(zone);
f623f0db
RW
1291 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1292 if (pfn_valid(pfn)) {
1293 struct page *page = pfn_to_page(pfn);
1294
7be98234
RW
1295 if (!swsusp_page_is_forbidden(page))
1296 swsusp_unset_page_free(page);
f623f0db 1297 }
1da177e4 1298
b2a0ac88
MG
1299 for_each_migratetype_order(order, t) {
1300 list_for_each(curr, &zone->free_area[order].free_list[t]) {
f623f0db 1301 unsigned long i;
1da177e4 1302
f623f0db
RW
1303 pfn = page_to_pfn(list_entry(curr, struct page, lru));
1304 for (i = 0; i < (1UL << order); i++)
7be98234 1305 swsusp_set_page_free(pfn_to_page(pfn + i));
f623f0db 1306 }
b2a0ac88 1307 }
1da177e4
LT
1308 spin_unlock_irqrestore(&zone->lock, flags);
1309}
e2c55dc8 1310#endif /* CONFIG_PM */
1da177e4 1311
1da177e4
LT
1312/*
1313 * Free a 0-order page
fc91668e 1314 * cold == 1 ? free a cold page : free a hot page
1da177e4 1315 */
fc91668e 1316void free_hot_cold_page(struct page *page, int cold)
1da177e4
LT
1317{
1318 struct zone *zone = page_zone(page);
1319 struct per_cpu_pages *pcp;
1320 unsigned long flags;
5f8dcc21 1321 int migratetype;
1da177e4 1322
ec95f53a 1323 if (!free_pages_prepare(page, 0))
689bcebf
HD
1324 return;
1325
5f8dcc21 1326 migratetype = get_pageblock_migratetype(page);
b12c4ad1 1327 set_freepage_migratetype(page, migratetype);
1da177e4 1328 local_irq_save(flags);
f8891e5e 1329 __count_vm_event(PGFREE);
da456f14 1330
5f8dcc21
MG
1331 /*
1332 * We only track unmovable, reclaimable and movable on pcp lists.
1333 * Free ISOLATE pages back to the allocator because they are being
1334 * offlined but treat RESERVE as movable pages so we can get those
1335 * areas back if necessary. Otherwise, we may have to free
1336 * excessively into the page allocator
1337 */
1338 if (migratetype >= MIGRATE_PCPTYPES) {
194159fb 1339 if (unlikely(is_migrate_isolate(migratetype))) {
5f8dcc21
MG
1340 free_one_page(zone, page, 0, migratetype);
1341 goto out;
1342 }
1343 migratetype = MIGRATE_MOVABLE;
1344 }
1345
99dcc3e5 1346 pcp = &this_cpu_ptr(zone->pageset)->pcp;
3dfa5721 1347 if (cold)
5f8dcc21 1348 list_add_tail(&page->lru, &pcp->lists[migratetype]);
3dfa5721 1349 else
5f8dcc21 1350 list_add(&page->lru, &pcp->lists[migratetype]);
1da177e4 1351 pcp->count++;
48db57f8 1352 if (pcp->count >= pcp->high) {
5f8dcc21 1353 free_pcppages_bulk(zone, pcp->batch, pcp);
48db57f8
NP
1354 pcp->count -= pcp->batch;
1355 }
5f8dcc21
MG
1356
1357out:
1da177e4 1358 local_irq_restore(flags);
1da177e4
LT
1359}
1360
cc59850e
KK
1361/*
1362 * Free a list of 0-order pages
1363 */
1364void free_hot_cold_page_list(struct list_head *list, int cold)
1365{
1366 struct page *page, *next;
1367
1368 list_for_each_entry_safe(page, next, list, lru) {
b413d48a 1369 trace_mm_page_free_batched(page, cold);
cc59850e
KK
1370 free_hot_cold_page(page, cold);
1371 }
1372}
1373
8dfcc9ba
NP
1374/*
1375 * split_page takes a non-compound higher-order page, and splits it into
1376 * n (1<<order) sub-pages: page[0..n]
1377 * Each sub-page must be freed individually.
1378 *
1379 * Note: this is probably too low level an operation for use in drivers.
1380 * Please consult with lkml before using this in your driver.
1381 */
1382void split_page(struct page *page, unsigned int order)
1383{
1384 int i;
1385
725d704e
NP
1386 VM_BUG_ON(PageCompound(page));
1387 VM_BUG_ON(!page_count(page));
b1eeab67
VN
1388
1389#ifdef CONFIG_KMEMCHECK
1390 /*
1391 * Split shadow pages too, because free(page[0]) would
1392 * otherwise free the whole shadow.
1393 */
1394 if (kmemcheck_page_is_tracked(page))
1395 split_page(virt_to_page(page[0].shadow), order);
1396#endif
1397
7835e98b
NP
1398 for (i = 1; i < (1 << order); i++)
1399 set_page_refcounted(page + i);
8dfcc9ba 1400}
5853ff23 1401EXPORT_SYMBOL_GPL(split_page);
8dfcc9ba 1402
8fb74b9f 1403static int __isolate_free_page(struct page *page, unsigned int order)
748446bb 1404{
748446bb
MG
1405 unsigned long watermark;
1406 struct zone *zone;
2139cbe6 1407 int mt;
748446bb
MG
1408
1409 BUG_ON(!PageBuddy(page));
1410
1411 zone = page_zone(page);
2e30abd1 1412 mt = get_pageblock_migratetype(page);
748446bb 1413
194159fb 1414 if (!is_migrate_isolate(mt)) {
2e30abd1
MS
1415 /* Obey watermarks as if the page was being allocated */
1416 watermark = low_wmark_pages(zone) + (1 << order);
1417 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1418 return 0;
1419
8fb74b9f 1420 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2e30abd1 1421 }
748446bb
MG
1422
1423 /* Remove page from free list */
1424 list_del(&page->lru);
1425 zone->free_area[order].nr_free--;
1426 rmv_page_order(page);
2139cbe6 1427
8fb74b9f 1428 /* Set the pageblock if the isolated page is at least a pageblock */
748446bb
MG
1429 if (order >= pageblock_order - 1) {
1430 struct page *endpage = page + (1 << order) - 1;
47118af0
MN
1431 for (; page < endpage; page += pageblock_nr_pages) {
1432 int mt = get_pageblock_migratetype(page);
194159fb 1433 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
47118af0
MN
1434 set_pageblock_migratetype(page,
1435 MIGRATE_MOVABLE);
1436 }
748446bb
MG
1437 }
1438
8fb74b9f 1439 return 1UL << order;
1fb3f8ca
MG
1440}
1441
1442/*
1443 * Similar to split_page except the page is already free. As this is only
1444 * being used for migration, the migratetype of the block also changes.
1445 * As this is called with interrupts disabled, the caller is responsible
1446 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1447 * are enabled.
1448 *
1449 * Note: this is probably too low level an operation for use in drivers.
1450 * Please consult with lkml before using this in your driver.
1451 */
1452int split_free_page(struct page *page)
1453{
1454 unsigned int order;
1455 int nr_pages;
1456
1fb3f8ca
MG
1457 order = page_order(page);
1458
8fb74b9f 1459 nr_pages = __isolate_free_page(page, order);
1fb3f8ca
MG
1460 if (!nr_pages)
1461 return 0;
1462
1463 /* Split into individual pages */
1464 set_page_refcounted(page);
1465 split_page(page, order);
1466 return nr_pages;
748446bb
MG
1467}
1468
1da177e4
LT
1469/*
1470 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
1471 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1472 * or two.
1473 */
0a15c3e9
MG
1474static inline
1475struct page *buffered_rmqueue(struct zone *preferred_zone,
3dd28266
MG
1476 struct zone *zone, int order, gfp_t gfp_flags,
1477 int migratetype)
1da177e4
LT
1478{
1479 unsigned long flags;
689bcebf 1480 struct page *page;
1da177e4
LT
1481 int cold = !!(gfp_flags & __GFP_COLD);
1482
689bcebf 1483again:
48db57f8 1484 if (likely(order == 0)) {
1da177e4 1485 struct per_cpu_pages *pcp;
5f8dcc21 1486 struct list_head *list;
1da177e4 1487
1da177e4 1488 local_irq_save(flags);
99dcc3e5
CL
1489 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1490 list = &pcp->lists[migratetype];
5f8dcc21 1491 if (list_empty(list)) {
535131e6 1492 pcp->count += rmqueue_bulk(zone, 0,
5f8dcc21 1493 pcp->batch, list,
e084b2d9 1494 migratetype, cold);
5f8dcc21 1495 if (unlikely(list_empty(list)))
6fb332fa 1496 goto failed;
535131e6 1497 }
b92a6edd 1498
5f8dcc21
MG
1499 if (cold)
1500 page = list_entry(list->prev, struct page, lru);
1501 else
1502 page = list_entry(list->next, struct page, lru);
1503
b92a6edd
MG
1504 list_del(&page->lru);
1505 pcp->count--;
7fb1d9fc 1506 } else {
dab48dab
AM
1507 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1508 /*
1509 * __GFP_NOFAIL is not to be used in new code.
1510 *
1511 * All __GFP_NOFAIL callers should be fixed so that they
1512 * properly detect and handle allocation failures.
1513 *
1514 * We most definitely don't want callers attempting to
4923abf9 1515 * allocate greater than order-1 page units with
dab48dab
AM
1516 * __GFP_NOFAIL.
1517 */
4923abf9 1518 WARN_ON_ONCE(order > 1);
dab48dab 1519 }
1da177e4 1520 spin_lock_irqsave(&zone->lock, flags);
b2a0ac88 1521 page = __rmqueue(zone, order, migratetype);
a74609fa
NP
1522 spin_unlock(&zone->lock);
1523 if (!page)
1524 goto failed;
d1ce749a
BZ
1525 __mod_zone_freepage_state(zone, -(1 << order),
1526 get_pageblock_migratetype(page));
1da177e4
LT
1527 }
1528
f8891e5e 1529 __count_zone_vm_events(PGALLOC, zone, 1 << order);
78afd561 1530 zone_statistics(preferred_zone, zone, gfp_flags);
a74609fa 1531 local_irq_restore(flags);
1da177e4 1532
725d704e 1533 VM_BUG_ON(bad_range(zone, page));
17cf4406 1534 if (prep_new_page(page, order, gfp_flags))
a74609fa 1535 goto again;
1da177e4 1536 return page;
a74609fa
NP
1537
1538failed:
1539 local_irq_restore(flags);
a74609fa 1540 return NULL;
1da177e4
LT
1541}
1542
933e312e
AM
1543#ifdef CONFIG_FAIL_PAGE_ALLOC
1544
b2588c4b 1545static struct {
933e312e
AM
1546 struct fault_attr attr;
1547
1548 u32 ignore_gfp_highmem;
1549 u32 ignore_gfp_wait;
54114994 1550 u32 min_order;
933e312e
AM
1551} fail_page_alloc = {
1552 .attr = FAULT_ATTR_INITIALIZER,
6b1b60f4
DM
1553 .ignore_gfp_wait = 1,
1554 .ignore_gfp_highmem = 1,
54114994 1555 .min_order = 1,
933e312e
AM
1556};
1557
1558static int __init setup_fail_page_alloc(char *str)
1559{
1560 return setup_fault_attr(&fail_page_alloc.attr, str);
1561}
1562__setup("fail_page_alloc=", setup_fail_page_alloc);
1563
deaf386e 1564static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 1565{
54114994 1566 if (order < fail_page_alloc.min_order)
deaf386e 1567 return false;
933e312e 1568 if (gfp_mask & __GFP_NOFAIL)
deaf386e 1569 return false;
933e312e 1570 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
deaf386e 1571 return false;
933e312e 1572 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
deaf386e 1573 return false;
933e312e
AM
1574
1575 return should_fail(&fail_page_alloc.attr, 1 << order);
1576}
1577
1578#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1579
1580static int __init fail_page_alloc_debugfs(void)
1581{
f4ae40a6 1582 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
933e312e 1583 struct dentry *dir;
933e312e 1584
dd48c085
AM
1585 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
1586 &fail_page_alloc.attr);
1587 if (IS_ERR(dir))
1588 return PTR_ERR(dir);
933e312e 1589
b2588c4b
AM
1590 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
1591 &fail_page_alloc.ignore_gfp_wait))
1592 goto fail;
1593 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1594 &fail_page_alloc.ignore_gfp_highmem))
1595 goto fail;
1596 if (!debugfs_create_u32("min-order", mode, dir,
1597 &fail_page_alloc.min_order))
1598 goto fail;
1599
1600 return 0;
1601fail:
dd48c085 1602 debugfs_remove_recursive(dir);
933e312e 1603
b2588c4b 1604 return -ENOMEM;
933e312e
AM
1605}
1606
1607late_initcall(fail_page_alloc_debugfs);
1608
1609#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1610
1611#else /* CONFIG_FAIL_PAGE_ALLOC */
1612
deaf386e 1613static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 1614{
deaf386e 1615 return false;
933e312e
AM
1616}
1617
1618#endif /* CONFIG_FAIL_PAGE_ALLOC */
1619
1da177e4 1620/*
88f5acf8 1621 * Return true if free pages are above 'mark'. This takes into account the order
1da177e4
LT
1622 * of the allocation.
1623 */
88f5acf8
MG
1624static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1625 int classzone_idx, int alloc_flags, long free_pages)
1da177e4
LT
1626{
1627 /* free_pages my go negative - that's OK */
d23ad423 1628 long min = mark;
2cfed075 1629 long lowmem_reserve = z->lowmem_reserve[classzone_idx];
1da177e4
LT
1630 int o;
1631
df0a6daa 1632 free_pages -= (1 << order) - 1;
7fb1d9fc 1633 if (alloc_flags & ALLOC_HIGH)
1da177e4 1634 min -= min / 2;
7fb1d9fc 1635 if (alloc_flags & ALLOC_HARDER)
1da177e4 1636 min -= min / 4;
d95ea5d1
BZ
1637#ifdef CONFIG_CMA
1638 /* If allocation can't use CMA areas don't use free CMA pages */
1639 if (!(alloc_flags & ALLOC_CMA))
1640 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
1641#endif
2cfed075 1642 if (free_pages <= min + lowmem_reserve)
88f5acf8 1643 return false;
1da177e4
LT
1644 for (o = 0; o < order; o++) {
1645 /* At the next order, this order's pages become unavailable */
1646 free_pages -= z->free_area[o].nr_free << o;
1647
1648 /* Require fewer higher order pages to be free */
1649 min >>= 1;
1650
1651 if (free_pages <= min)
88f5acf8 1652 return false;
1da177e4 1653 }
88f5acf8
MG
1654 return true;
1655}
1656
1657bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1658 int classzone_idx, int alloc_flags)
1659{
1660 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1661 zone_page_state(z, NR_FREE_PAGES));
1662}
1663
1664bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1665 int classzone_idx, int alloc_flags)
1666{
1667 long free_pages = zone_page_state(z, NR_FREE_PAGES);
1668
1669 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1670 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1671
1672 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1673 free_pages);
1da177e4
LT
1674}
1675
9276b1bc
PJ
1676#ifdef CONFIG_NUMA
1677/*
1678 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1679 * skip over zones that are not allowed by the cpuset, or that have
1680 * been recently (in last second) found to be nearly full. See further
1681 * comments in mmzone.h. Reduces cache footprint of zonelist scans
183ff22b 1682 * that have to skip over a lot of full or unallowed zones.
9276b1bc
PJ
1683 *
1684 * If the zonelist cache is present in the passed in zonelist, then
1685 * returns a pointer to the allowed node mask (either the current
4b0ef1fe 1686 * tasks mems_allowed, or node_states[N_MEMORY].)
9276b1bc
PJ
1687 *
1688 * If the zonelist cache is not available for this zonelist, does
1689 * nothing and returns NULL.
1690 *
1691 * If the fullzones BITMAP in the zonelist cache is stale (more than
1692 * a second since last zap'd) then we zap it out (clear its bits.)
1693 *
1694 * We hold off even calling zlc_setup, until after we've checked the
1695 * first zone in the zonelist, on the theory that most allocations will
1696 * be satisfied from that first zone, so best to examine that zone as
1697 * quickly as we can.
1698 */
1699static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1700{
1701 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1702 nodemask_t *allowednodes; /* zonelist_cache approximation */
1703
1704 zlc = zonelist->zlcache_ptr;
1705 if (!zlc)
1706 return NULL;
1707
f05111f5 1708 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
9276b1bc
PJ
1709 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1710 zlc->last_full_zap = jiffies;
1711 }
1712
1713 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1714 &cpuset_current_mems_allowed :
4b0ef1fe 1715 &node_states[N_MEMORY];
9276b1bc
PJ
1716 return allowednodes;
1717}
1718
1719/*
1720 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1721 * if it is worth looking at further for free memory:
1722 * 1) Check that the zone isn't thought to be full (doesn't have its
1723 * bit set in the zonelist_cache fullzones BITMAP).
1724 * 2) Check that the zones node (obtained from the zonelist_cache
1725 * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1726 * Return true (non-zero) if zone is worth looking at further, or
1727 * else return false (zero) if it is not.
1728 *
1729 * This check -ignores- the distinction between various watermarks,
1730 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
1731 * found to be full for any variation of these watermarks, it will
1732 * be considered full for up to one second by all requests, unless
1733 * we are so low on memory on all allowed nodes that we are forced
1734 * into the second scan of the zonelist.
1735 *
1736 * In the second scan we ignore this zonelist cache and exactly
1737 * apply the watermarks to all zones, even it is slower to do so.
1738 * We are low on memory in the second scan, and should leave no stone
1739 * unturned looking for a free page.
1740 */
dd1a239f 1741static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1742 nodemask_t *allowednodes)
1743{
1744 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1745 int i; /* index of *z in zonelist zones */
1746 int n; /* node that zone *z is on */
1747
1748 zlc = zonelist->zlcache_ptr;
1749 if (!zlc)
1750 return 1;
1751
dd1a239f 1752 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1753 n = zlc->z_to_n[i];
1754
1755 /* This zone is worth trying if it is allowed but not full */
1756 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1757}
1758
1759/*
1760 * Given 'z' scanning a zonelist, set the corresponding bit in
1761 * zlc->fullzones, so that subsequent attempts to allocate a page
1762 * from that zone don't waste time re-examining it.
1763 */
dd1a239f 1764static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1765{
1766 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1767 int i; /* index of *z in zonelist zones */
1768
1769 zlc = zonelist->zlcache_ptr;
1770 if (!zlc)
1771 return;
1772
dd1a239f 1773 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1774
1775 set_bit(i, zlc->fullzones);
1776}
1777
76d3fbf8
MG
1778/*
1779 * clear all zones full, called after direct reclaim makes progress so that
1780 * a zone that was recently full is not skipped over for up to a second
1781 */
1782static void zlc_clear_zones_full(struct zonelist *zonelist)
1783{
1784 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1785
1786 zlc = zonelist->zlcache_ptr;
1787 if (!zlc)
1788 return;
1789
1790 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1791}
1792
957f822a
DR
1793static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
1794{
1795 return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes);
1796}
1797
1798static void __paginginit init_zone_allows_reclaim(int nid)
1799{
1800 int i;
1801
1802 for_each_online_node(i)
6b187d02 1803 if (node_distance(nid, i) <= RECLAIM_DISTANCE)
957f822a 1804 node_set(i, NODE_DATA(nid)->reclaim_nodes);
6b187d02 1805 else
957f822a 1806 zone_reclaim_mode = 1;
957f822a
DR
1807}
1808
9276b1bc
PJ
1809#else /* CONFIG_NUMA */
1810
1811static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1812{
1813 return NULL;
1814}
1815
dd1a239f 1816static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1817 nodemask_t *allowednodes)
1818{
1819 return 1;
1820}
1821
dd1a239f 1822static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1823{
1824}
76d3fbf8
MG
1825
1826static void zlc_clear_zones_full(struct zonelist *zonelist)
1827{
1828}
957f822a
DR
1829
1830static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
1831{
1832 return true;
1833}
1834
1835static inline void init_zone_allows_reclaim(int nid)
1836{
1837}
9276b1bc
PJ
1838#endif /* CONFIG_NUMA */
1839
7fb1d9fc 1840/*
0798e519 1841 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
1842 * a page.
1843 */
1844static struct page *
19770b32 1845get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
5117f45d 1846 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
3dd28266 1847 struct zone *preferred_zone, int migratetype)
753ee728 1848{
dd1a239f 1849 struct zoneref *z;
7fb1d9fc 1850 struct page *page = NULL;
54a6eb5c 1851 int classzone_idx;
5117f45d 1852 struct zone *zone;
9276b1bc
PJ
1853 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1854 int zlc_active = 0; /* set if using zonelist_cache */
1855 int did_zlc_setup = 0; /* just call zlc_setup() one time */
54a6eb5c 1856
19770b32 1857 classzone_idx = zone_idx(preferred_zone);
9276b1bc 1858zonelist_scan:
7fb1d9fc 1859 /*
9276b1bc 1860 * Scan zonelist, looking for a zone with enough free.
7fb1d9fc
RS
1861 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1862 */
19770b32
MG
1863 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1864 high_zoneidx, nodemask) {
e5adfffc 1865 if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
9276b1bc
PJ
1866 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1867 continue;
7fb1d9fc 1868 if ((alloc_flags & ALLOC_CPUSET) &&
02a0e53d 1869 !cpuset_zone_allowed_softwall(zone, gfp_mask))
cd38b115 1870 continue;
a756cf59
JW
1871 /*
1872 * When allocating a page cache page for writing, we
1873 * want to get it from a zone that is within its dirty
1874 * limit, such that no single zone holds more than its
1875 * proportional share of globally allowed dirty pages.
1876 * The dirty limits take into account the zone's
1877 * lowmem reserves and high watermark so that kswapd
1878 * should be able to balance it without having to
1879 * write pages from its LRU list.
1880 *
1881 * This may look like it could increase pressure on
1882 * lower zones by failing allocations in higher zones
1883 * before they are full. But the pages that do spill
1884 * over are limited as the lower zones are protected
1885 * by this very same mechanism. It should not become
1886 * a practical burden to them.
1887 *
1888 * XXX: For now, allow allocations to potentially
1889 * exceed the per-zone dirty limit in the slowpath
1890 * (ALLOC_WMARK_LOW unset) before going into reclaim,
1891 * which is important when on a NUMA setup the allowed
1892 * zones are together not big enough to reach the
1893 * global limit. The proper fix for these situations
1894 * will require awareness of zones in the
1895 * dirty-throttling and the flusher threads.
1896 */
1897 if ((alloc_flags & ALLOC_WMARK_LOW) &&
1898 (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
1899 goto this_zone_full;
7fb1d9fc 1900
41858966 1901 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
7fb1d9fc 1902 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
3148890b 1903 unsigned long mark;
fa5e084e
MG
1904 int ret;
1905
41858966 1906 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
fa5e084e
MG
1907 if (zone_watermark_ok(zone, order, mark,
1908 classzone_idx, alloc_flags))
1909 goto try_this_zone;
1910
e5adfffc
KS
1911 if (IS_ENABLED(CONFIG_NUMA) &&
1912 !did_zlc_setup && nr_online_nodes > 1) {
cd38b115
MG
1913 /*
1914 * we do zlc_setup if there are multiple nodes
1915 * and before considering the first zone allowed
1916 * by the cpuset.
1917 */
1918 allowednodes = zlc_setup(zonelist, alloc_flags);
1919 zlc_active = 1;
1920 did_zlc_setup = 1;
1921 }
1922
957f822a
DR
1923 if (zone_reclaim_mode == 0 ||
1924 !zone_allows_reclaim(preferred_zone, zone))
fa5e084e
MG
1925 goto this_zone_full;
1926
cd38b115
MG
1927 /*
1928 * As we may have just activated ZLC, check if the first
1929 * eligible zone has failed zone_reclaim recently.
1930 */
e5adfffc 1931 if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
cd38b115
MG
1932 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1933 continue;
1934
fa5e084e
MG
1935 ret = zone_reclaim(zone, gfp_mask, order);
1936 switch (ret) {
1937 case ZONE_RECLAIM_NOSCAN:
1938 /* did not scan */
cd38b115 1939 continue;
fa5e084e
MG
1940 case ZONE_RECLAIM_FULL:
1941 /* scanned but unreclaimable */
cd38b115 1942 continue;
fa5e084e
MG
1943 default:
1944 /* did we reclaim enough */
fed2719e 1945 if (zone_watermark_ok(zone, order, mark,
fa5e084e 1946 classzone_idx, alloc_flags))
fed2719e
MG
1947 goto try_this_zone;
1948
1949 /*
1950 * Failed to reclaim enough to meet watermark.
1951 * Only mark the zone full if checking the min
1952 * watermark or if we failed to reclaim just
1953 * 1<<order pages or else the page allocator
1954 * fastpath will prematurely mark zones full
1955 * when the watermark is between the low and
1956 * min watermarks.
1957 */
1958 if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) ||
1959 ret == ZONE_RECLAIM_SOME)
9276b1bc 1960 goto this_zone_full;
fed2719e
MG
1961
1962 continue;
0798e519 1963 }
7fb1d9fc
RS
1964 }
1965
fa5e084e 1966try_this_zone:
3dd28266
MG
1967 page = buffered_rmqueue(preferred_zone, zone, order,
1968 gfp_mask, migratetype);
0798e519 1969 if (page)
7fb1d9fc 1970 break;
9276b1bc 1971this_zone_full:
e5adfffc 1972 if (IS_ENABLED(CONFIG_NUMA))
9276b1bc 1973 zlc_mark_zone_full(zonelist, z);
54a6eb5c 1974 }
9276b1bc 1975
e5adfffc 1976 if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) {
9276b1bc
PJ
1977 /* Disable zlc cache for second zonelist scan */
1978 zlc_active = 0;
1979 goto zonelist_scan;
1980 }
b121186a
AS
1981
1982 if (page)
1983 /*
1984 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
1985 * necessary to allocate the page. The expectation is
1986 * that the caller is taking steps that will free more
1987 * memory. The caller should avoid the page being used
1988 * for !PFMEMALLOC purposes.
1989 */
1990 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
1991
7fb1d9fc 1992 return page;
753ee728
MH
1993}
1994
29423e77
DR
1995/*
1996 * Large machines with many possible nodes should not always dump per-node
1997 * meminfo in irq context.
1998 */
1999static inline bool should_suppress_show_mem(void)
2000{
2001 bool ret = false;
2002
2003#if NODES_SHIFT > 8
2004 ret = in_interrupt();
2005#endif
2006 return ret;
2007}
2008
a238ab5b
DH
2009static DEFINE_RATELIMIT_STATE(nopage_rs,
2010 DEFAULT_RATELIMIT_INTERVAL,
2011 DEFAULT_RATELIMIT_BURST);
2012
2013void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
2014{
a238ab5b
DH
2015 unsigned int filter = SHOW_MEM_FILTER_NODES;
2016
c0a32fc5
SG
2017 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2018 debug_guardpage_minorder() > 0)
a238ab5b
DH
2019 return;
2020
4b59e6c4
DR
2021 /*
2022 * Walking all memory to count page types is very expensive and should
2023 * be inhibited in non-blockable contexts.
2024 */
2025 if (!(gfp_mask & __GFP_WAIT))
2026 filter |= SHOW_MEM_FILTER_PAGE_COUNT;
2027
a238ab5b
DH
2028 /*
2029 * This documents exceptions given to allocations in certain
2030 * contexts that are allowed to allocate outside current's set
2031 * of allowed nodes.
2032 */
2033 if (!(gfp_mask & __GFP_NOMEMALLOC))
2034 if (test_thread_flag(TIF_MEMDIE) ||
2035 (current->flags & (PF_MEMALLOC | PF_EXITING)))
2036 filter &= ~SHOW_MEM_FILTER_NODES;
2037 if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
2038 filter &= ~SHOW_MEM_FILTER_NODES;
2039
2040 if (fmt) {
3ee9a4f0
JP
2041 struct va_format vaf;
2042 va_list args;
2043
a238ab5b 2044 va_start(args, fmt);
3ee9a4f0
JP
2045
2046 vaf.fmt = fmt;
2047 vaf.va = &args;
2048
2049 pr_warn("%pV", &vaf);
2050
a238ab5b
DH
2051 va_end(args);
2052 }
2053
3ee9a4f0
JP
2054 pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
2055 current->comm, order, gfp_mask);
a238ab5b
DH
2056
2057 dump_stack();
2058 if (!should_suppress_show_mem())
2059 show_mem(filter);
2060}
2061
11e33f6a
MG
2062static inline int
2063should_alloc_retry(gfp_t gfp_mask, unsigned int order,
f90ac398 2064 unsigned long did_some_progress,
11e33f6a 2065 unsigned long pages_reclaimed)
1da177e4 2066{
11e33f6a
MG
2067 /* Do not loop if specifically requested */
2068 if (gfp_mask & __GFP_NORETRY)
2069 return 0;
1da177e4 2070
f90ac398
MG
2071 /* Always retry if specifically requested */
2072 if (gfp_mask & __GFP_NOFAIL)
2073 return 1;
2074
2075 /*
2076 * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
2077 * making forward progress without invoking OOM. Suspend also disables
2078 * storage devices so kswapd will not help. Bail if we are suspending.
2079 */
2080 if (!did_some_progress && pm_suspended_storage())
2081 return 0;
2082
11e33f6a
MG
2083 /*
2084 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
2085 * means __GFP_NOFAIL, but that may not be true in other
2086 * implementations.
2087 */
2088 if (order <= PAGE_ALLOC_COSTLY_ORDER)
2089 return 1;
2090
2091 /*
2092 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
2093 * specified, then we retry until we no longer reclaim any pages
2094 * (above), or we've reclaimed an order of pages at least as
2095 * large as the allocation's order. In both cases, if the
2096 * allocation still fails, we stop retrying.
2097 */
2098 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
2099 return 1;
cf40bd16 2100
11e33f6a
MG
2101 return 0;
2102}
933e312e 2103
11e33f6a
MG
2104static inline struct page *
2105__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2106 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
2107 nodemask_t *nodemask, struct zone *preferred_zone,
2108 int migratetype)
11e33f6a
MG
2109{
2110 struct page *page;
2111
2112 /* Acquire the OOM killer lock for the zones in zonelist */
ff321fea 2113 if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
11e33f6a 2114 schedule_timeout_uninterruptible(1);
1da177e4
LT
2115 return NULL;
2116 }
6b1de916 2117
11e33f6a
MG
2118 /*
2119 * Go through the zonelist yet one more time, keep very high watermark
2120 * here, this is only to catch a parallel oom killing, we must fail if
2121 * we're still under heavy pressure.
2122 */
2123 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
2124 order, zonelist, high_zoneidx,
5117f45d 2125 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
3dd28266 2126 preferred_zone, migratetype);
7fb1d9fc 2127 if (page)
11e33f6a
MG
2128 goto out;
2129
4365a567
KH
2130 if (!(gfp_mask & __GFP_NOFAIL)) {
2131 /* The OOM killer will not help higher order allocs */
2132 if (order > PAGE_ALLOC_COSTLY_ORDER)
2133 goto out;
03668b3c
DR
2134 /* The OOM killer does not needlessly kill tasks for lowmem */
2135 if (high_zoneidx < ZONE_NORMAL)
2136 goto out;
4365a567
KH
2137 /*
2138 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
2139 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
2140 * The caller should handle page allocation failure by itself if
2141 * it specifies __GFP_THISNODE.
2142 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
2143 */
2144 if (gfp_mask & __GFP_THISNODE)
2145 goto out;
2146 }
11e33f6a 2147 /* Exhausted what can be done so it's blamo time */
08ab9b10 2148 out_of_memory(zonelist, gfp_mask, order, nodemask, false);
11e33f6a
MG
2149
2150out:
2151 clear_zonelist_oom(zonelist, gfp_mask);
2152 return page;
2153}
2154
56de7263
MG
2155#ifdef CONFIG_COMPACTION
2156/* Try memory compaction for high-order allocations before reclaim */
2157static struct page *
2158__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2159 struct zonelist *zonelist, enum zone_type high_zoneidx,
2160 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
66199712 2161 int migratetype, bool sync_migration,
c67fe375 2162 bool *contended_compaction, bool *deferred_compaction,
66199712 2163 unsigned long *did_some_progress)
56de7263 2164{
66199712 2165 if (!order)
56de7263
MG
2166 return NULL;
2167
aff62249 2168 if (compaction_deferred(preferred_zone, order)) {
66199712
MG
2169 *deferred_compaction = true;
2170 return NULL;
2171 }
2172
c06b1fca 2173 current->flags |= PF_MEMALLOC;
56de7263 2174 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
c67fe375 2175 nodemask, sync_migration,
8fb74b9f 2176 contended_compaction);
c06b1fca 2177 current->flags &= ~PF_MEMALLOC;
56de7263 2178
1fb3f8ca 2179 if (*did_some_progress != COMPACT_SKIPPED) {
8fb74b9f
MG
2180 struct page *page;
2181
56de7263
MG
2182 /* Page migration frees to the PCP lists but we want merging */
2183 drain_pages(get_cpu());
2184 put_cpu();
2185
2186 page = get_page_from_freelist(gfp_mask, nodemask,
2187 order, zonelist, high_zoneidx,
cfd19c5a
MG
2188 alloc_flags & ~ALLOC_NO_WATERMARKS,
2189 preferred_zone, migratetype);
56de7263 2190 if (page) {
62997027 2191 preferred_zone->compact_blockskip_flush = false;
4f92e258
MG
2192 preferred_zone->compact_considered = 0;
2193 preferred_zone->compact_defer_shift = 0;
aff62249
RR
2194 if (order >= preferred_zone->compact_order_failed)
2195 preferred_zone->compact_order_failed = order + 1;
56de7263
MG
2196 count_vm_event(COMPACTSUCCESS);
2197 return page;
2198 }
2199
2200 /*
2201 * It's bad if compaction run occurs and fails.
2202 * The most likely reason is that pages exist,
2203 * but not enough to satisfy watermarks.
2204 */
2205 count_vm_event(COMPACTFAIL);
66199712
MG
2206
2207 /*
2208 * As async compaction considers a subset of pageblocks, only
2209 * defer if the failure was a sync compaction failure.
2210 */
2211 if (sync_migration)
aff62249 2212 defer_compaction(preferred_zone, order);
56de7263
MG
2213
2214 cond_resched();
2215 }
2216
2217 return NULL;
2218}
2219#else
2220static inline struct page *
2221__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2222 struct zonelist *zonelist, enum zone_type high_zoneidx,
2223 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
66199712 2224 int migratetype, bool sync_migration,
c67fe375 2225 bool *contended_compaction, bool *deferred_compaction,
66199712 2226 unsigned long *did_some_progress)
56de7263
MG
2227{
2228 return NULL;
2229}
2230#endif /* CONFIG_COMPACTION */
2231
bba90710
MS
2232/* Perform direct synchronous page reclaim */
2233static int
2234__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
2235 nodemask_t *nodemask)
11e33f6a 2236{
11e33f6a 2237 struct reclaim_state reclaim_state;
bba90710 2238 int progress;
11e33f6a
MG
2239
2240 cond_resched();
2241
2242 /* We now go into synchronous reclaim */
2243 cpuset_memory_pressure_bump();
c06b1fca 2244 current->flags |= PF_MEMALLOC;
11e33f6a
MG
2245 lockdep_set_current_reclaim_state(gfp_mask);
2246 reclaim_state.reclaimed_slab = 0;
c06b1fca 2247 current->reclaim_state = &reclaim_state;
11e33f6a 2248
bba90710 2249 progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
11e33f6a 2250
c06b1fca 2251 current->reclaim_state = NULL;
11e33f6a 2252 lockdep_clear_current_reclaim_state();
c06b1fca 2253 current->flags &= ~PF_MEMALLOC;
11e33f6a
MG
2254
2255 cond_resched();
2256
bba90710
MS
2257 return progress;
2258}
2259
2260/* The really slow allocator path where we enter direct reclaim */
2261static inline struct page *
2262__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2263 struct zonelist *zonelist, enum zone_type high_zoneidx,
2264 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2265 int migratetype, unsigned long *did_some_progress)
2266{
2267 struct page *page = NULL;
2268 bool drained = false;
2269
2270 *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
2271 nodemask);
9ee493ce
MG
2272 if (unlikely(!(*did_some_progress)))
2273 return NULL;
11e33f6a 2274
76d3fbf8 2275 /* After successful reclaim, reconsider all zones for allocation */
e5adfffc 2276 if (IS_ENABLED(CONFIG_NUMA))
76d3fbf8
MG
2277 zlc_clear_zones_full(zonelist);
2278
9ee493ce
MG
2279retry:
2280 page = get_page_from_freelist(gfp_mask, nodemask, order,
5117f45d 2281 zonelist, high_zoneidx,
cfd19c5a
MG
2282 alloc_flags & ~ALLOC_NO_WATERMARKS,
2283 preferred_zone, migratetype);
9ee493ce
MG
2284
2285 /*
2286 * If an allocation failed after direct reclaim, it could be because
2287 * pages are pinned on the per-cpu lists. Drain them and try again
2288 */
2289 if (!page && !drained) {
2290 drain_all_pages();
2291 drained = true;
2292 goto retry;
2293 }
2294
11e33f6a
MG
2295 return page;
2296}
2297
1da177e4 2298/*
11e33f6a
MG
2299 * This is called in the allocator slow-path if the allocation request is of
2300 * sufficient urgency to ignore watermarks and take other desperate measures
1da177e4 2301 */
11e33f6a
MG
2302static inline struct page *
2303__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2304 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
2305 nodemask_t *nodemask, struct zone *preferred_zone,
2306 int migratetype)
11e33f6a
MG
2307{
2308 struct page *page;
2309
2310 do {
2311 page = get_page_from_freelist(gfp_mask, nodemask, order,
5117f45d 2312 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
3dd28266 2313 preferred_zone, migratetype);
11e33f6a
MG
2314
2315 if (!page && gfp_mask & __GFP_NOFAIL)
0e093d99 2316 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
11e33f6a
MG
2317 } while (!page && (gfp_mask & __GFP_NOFAIL));
2318
2319 return page;
2320}
2321
2322static inline
2323void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
99504748
MG
2324 enum zone_type high_zoneidx,
2325 enum zone_type classzone_idx)
1da177e4 2326{
dd1a239f
MG
2327 struct zoneref *z;
2328 struct zone *zone;
1da177e4 2329
11e33f6a 2330 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
99504748 2331 wakeup_kswapd(zone, order, classzone_idx);
11e33f6a 2332}
cf40bd16 2333
341ce06f
PZ
2334static inline int
2335gfp_to_alloc_flags(gfp_t gfp_mask)
2336{
341ce06f
PZ
2337 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2338 const gfp_t wait = gfp_mask & __GFP_WAIT;
1da177e4 2339
a56f57ff 2340 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
e6223a3b 2341 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
933e312e 2342
341ce06f
PZ
2343 /*
2344 * The caller may dip into page reserves a bit more if the caller
2345 * cannot run direct reclaim, or if the caller has realtime scheduling
2346 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
2347 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
2348 */
e6223a3b 2349 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
1da177e4 2350
341ce06f 2351 if (!wait) {
5c3240d9
AA
2352 /*
2353 * Not worth trying to allocate harder for
2354 * __GFP_NOMEMALLOC even if it can't schedule.
2355 */
2356 if (!(gfp_mask & __GFP_NOMEMALLOC))
2357 alloc_flags |= ALLOC_HARDER;
523b9458 2358 /*
341ce06f
PZ
2359 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
2360 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
523b9458 2361 */
341ce06f 2362 alloc_flags &= ~ALLOC_CPUSET;
c06b1fca 2363 } else if (unlikely(rt_task(current)) && !in_interrupt())
341ce06f
PZ
2364 alloc_flags |= ALLOC_HARDER;
2365
b37f1dd0
MG
2366 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2367 if (gfp_mask & __GFP_MEMALLOC)
2368 alloc_flags |= ALLOC_NO_WATERMARKS;
907aed48
MG
2369 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
2370 alloc_flags |= ALLOC_NO_WATERMARKS;
2371 else if (!in_interrupt() &&
2372 ((current->flags & PF_MEMALLOC) ||
2373 unlikely(test_thread_flag(TIF_MEMDIE))))
341ce06f 2374 alloc_flags |= ALLOC_NO_WATERMARKS;
1da177e4 2375 }
d95ea5d1
BZ
2376#ifdef CONFIG_CMA
2377 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2378 alloc_flags |= ALLOC_CMA;
2379#endif
341ce06f
PZ
2380 return alloc_flags;
2381}
2382
072bb0aa
MG
2383bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
2384{
b37f1dd0 2385 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
072bb0aa
MG
2386}
2387
11e33f6a
MG
2388static inline struct page *
2389__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2390 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
2391 nodemask_t *nodemask, struct zone *preferred_zone,
2392 int migratetype)
11e33f6a
MG
2393{
2394 const gfp_t wait = gfp_mask & __GFP_WAIT;
2395 struct page *page = NULL;
2396 int alloc_flags;
2397 unsigned long pages_reclaimed = 0;
2398 unsigned long did_some_progress;
77f1fe6b 2399 bool sync_migration = false;
66199712 2400 bool deferred_compaction = false;
c67fe375 2401 bool contended_compaction = false;
1da177e4 2402
72807a74
MG
2403 /*
2404 * In the slowpath, we sanity check order to avoid ever trying to
2405 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2406 * be using allocators in order of preference for an area that is
2407 * too large.
2408 */
1fc28b70
MG
2409 if (order >= MAX_ORDER) {
2410 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
72807a74 2411 return NULL;
1fc28b70 2412 }
1da177e4 2413
952f3b51
CL
2414 /*
2415 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2416 * __GFP_NOWARN set) should not cause reclaim since the subsystem
2417 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2418 * using a larger set of nodes after it has established that the
2419 * allowed per node queues are empty and that nodes are
2420 * over allocated.
2421 */
e5adfffc
KS
2422 if (IS_ENABLED(CONFIG_NUMA) &&
2423 (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
952f3b51
CL
2424 goto nopage;
2425
cc4a6851 2426restart:
caf49191
LT
2427 if (!(gfp_mask & __GFP_NO_KSWAPD))
2428 wake_all_kswapd(order, zonelist, high_zoneidx,
2429 zone_idx(preferred_zone));
1da177e4 2430
9bf2229f 2431 /*
7fb1d9fc
RS
2432 * OK, we're below the kswapd watermark and have kicked background
2433 * reclaim. Now things get more complex, so set up alloc_flags according
2434 * to how we want to proceed.
9bf2229f 2435 */
341ce06f 2436 alloc_flags = gfp_to_alloc_flags(gfp_mask);
1da177e4 2437
f33261d7
DR
2438 /*
2439 * Find the true preferred zone if the allocation is unconstrained by
2440 * cpusets.
2441 */
2442 if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2443 first_zones_zonelist(zonelist, high_zoneidx, NULL,
2444 &preferred_zone);
2445
cfa54a0f 2446rebalance:
341ce06f 2447 /* This is the last chance, in general, before the goto nopage. */
19770b32 2448 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
341ce06f
PZ
2449 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2450 preferred_zone, migratetype);
7fb1d9fc
RS
2451 if (page)
2452 goto got_pg;
1da177e4 2453
11e33f6a 2454 /* Allocate without watermarks if the context allows */
341ce06f 2455 if (alloc_flags & ALLOC_NO_WATERMARKS) {
183f6371
MG
2456 /*
2457 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
2458 * the allocation is high priority and these type of
2459 * allocations are system rather than user orientated
2460 */
2461 zonelist = node_zonelist(numa_node_id(), gfp_mask);
2462
341ce06f
PZ
2463 page = __alloc_pages_high_priority(gfp_mask, order,
2464 zonelist, high_zoneidx, nodemask,
2465 preferred_zone, migratetype);
cfd19c5a 2466 if (page) {
341ce06f 2467 goto got_pg;
cfd19c5a 2468 }
1da177e4
LT
2469 }
2470
2471 /* Atomic allocations - we can't balance anything */
2472 if (!wait)
2473 goto nopage;
2474
341ce06f 2475 /* Avoid recursion of direct reclaim */
c06b1fca 2476 if (current->flags & PF_MEMALLOC)
341ce06f
PZ
2477 goto nopage;
2478
6583bb64
DR
2479 /* Avoid allocations with no watermarks from looping endlessly */
2480 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2481 goto nopage;
2482
77f1fe6b
MG
2483 /*
2484 * Try direct compaction. The first pass is asynchronous. Subsequent
2485 * attempts after direct reclaim are synchronous
2486 */
56de7263
MG
2487 page = __alloc_pages_direct_compact(gfp_mask, order,
2488 zonelist, high_zoneidx,
2489 nodemask,
2490 alloc_flags, preferred_zone,
66199712 2491 migratetype, sync_migration,
c67fe375 2492 &contended_compaction,
66199712
MG
2493 &deferred_compaction,
2494 &did_some_progress);
56de7263
MG
2495 if (page)
2496 goto got_pg;
c6a140bf 2497 sync_migration = true;
56de7263 2498
31f8d42d
LT
2499 /*
2500 * If compaction is deferred for high-order allocations, it is because
2501 * sync compaction recently failed. In this is the case and the caller
2502 * requested a movable allocation that does not heavily disrupt the
2503 * system then fail the allocation instead of entering direct reclaim.
2504 */
2505 if ((deferred_compaction || contended_compaction) &&
caf49191 2506 (gfp_mask & __GFP_NO_KSWAPD))
31f8d42d 2507 goto nopage;
66199712 2508
11e33f6a
MG
2509 /* Try direct reclaim and then allocating */
2510 page = __alloc_pages_direct_reclaim(gfp_mask, order,
2511 zonelist, high_zoneidx,
2512 nodemask,
5117f45d 2513 alloc_flags, preferred_zone,
3dd28266 2514 migratetype, &did_some_progress);
11e33f6a
MG
2515 if (page)
2516 goto got_pg;
1da177e4 2517
e33c3b5e 2518 /*
11e33f6a
MG
2519 * If we failed to make any progress reclaiming, then we are
2520 * running out of options and have to consider going OOM
e33c3b5e 2521 */
11e33f6a
MG
2522 if (!did_some_progress) {
2523 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
7f33d49a
RW
2524 if (oom_killer_disabled)
2525 goto nopage;
29fd66d2
DR
2526 /* Coredumps can quickly deplete all memory reserves */
2527 if ((current->flags & PF_DUMPCORE) &&
2528 !(gfp_mask & __GFP_NOFAIL))
2529 goto nopage;
11e33f6a
MG
2530 page = __alloc_pages_may_oom(gfp_mask, order,
2531 zonelist, high_zoneidx,
3dd28266
MG
2532 nodemask, preferred_zone,
2533 migratetype);
11e33f6a
MG
2534 if (page)
2535 goto got_pg;
1da177e4 2536
03668b3c
DR
2537 if (!(gfp_mask & __GFP_NOFAIL)) {
2538 /*
2539 * The oom killer is not called for high-order
2540 * allocations that may fail, so if no progress
2541 * is being made, there are no other options and
2542 * retrying is unlikely to help.
2543 */
2544 if (order > PAGE_ALLOC_COSTLY_ORDER)
2545 goto nopage;
2546 /*
2547 * The oom killer is not called for lowmem
2548 * allocations to prevent needlessly killing
2549 * innocent tasks.
2550 */
2551 if (high_zoneidx < ZONE_NORMAL)
2552 goto nopage;
2553 }
e2c55dc8 2554
ff0ceb9d
DR
2555 goto restart;
2556 }
1da177e4
LT
2557 }
2558
11e33f6a 2559 /* Check if we should retry the allocation */
a41f24ea 2560 pages_reclaimed += did_some_progress;
f90ac398
MG
2561 if (should_alloc_retry(gfp_mask, order, did_some_progress,
2562 pages_reclaimed)) {
11e33f6a 2563 /* Wait for some write requests to complete then retry */
0e093d99 2564 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
1da177e4 2565 goto rebalance;
3e7d3449
MG
2566 } else {
2567 /*
2568 * High-order allocations do not necessarily loop after
2569 * direct reclaim and reclaim/compaction depends on compaction
2570 * being called after reclaim so call directly if necessary
2571 */
2572 page = __alloc_pages_direct_compact(gfp_mask, order,
2573 zonelist, high_zoneidx,
2574 nodemask,
2575 alloc_flags, preferred_zone,
66199712 2576 migratetype, sync_migration,
c67fe375 2577 &contended_compaction,
66199712
MG
2578 &deferred_compaction,
2579 &did_some_progress);
3e7d3449
MG
2580 if (page)
2581 goto got_pg;
1da177e4
LT
2582 }
2583
2584nopage:
a238ab5b 2585 warn_alloc_failed(gfp_mask, order, NULL);
b1eeab67 2586 return page;
1da177e4 2587got_pg:
b1eeab67
VN
2588 if (kmemcheck_enabled)
2589 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
11e33f6a 2590
072bb0aa 2591 return page;
1da177e4 2592}
11e33f6a
MG
2593
2594/*
2595 * This is the 'heart' of the zoned buddy allocator.
2596 */
2597struct page *
2598__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2599 struct zonelist *zonelist, nodemask_t *nodemask)
2600{
2601 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
5117f45d 2602 struct zone *preferred_zone;
cc9a6c87 2603 struct page *page = NULL;
3dd28266 2604 int migratetype = allocflags_to_migratetype(gfp_mask);
cc9a6c87 2605 unsigned int cpuset_mems_cookie;
d95ea5d1 2606 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
6a1a0d3b 2607 struct mem_cgroup *memcg = NULL;
11e33f6a 2608
dcce284a
BH
2609 gfp_mask &= gfp_allowed_mask;
2610
11e33f6a
MG
2611 lockdep_trace_alloc(gfp_mask);
2612
2613 might_sleep_if(gfp_mask & __GFP_WAIT);
2614
2615 if (should_fail_alloc_page(gfp_mask, order))
2616 return NULL;
2617
2618 /*
2619 * Check the zones suitable for the gfp_mask contain at least one
2620 * valid zone. It's possible to have an empty zonelist as a result
2621 * of GFP_THISNODE and a memoryless node
2622 */
2623 if (unlikely(!zonelist->_zonerefs->zone))
2624 return NULL;
2625
6a1a0d3b
GC
2626 /*
2627 * Will only have any effect when __GFP_KMEMCG is set. This is
2628 * verified in the (always inline) callee
2629 */
2630 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
2631 return NULL;
2632
cc9a6c87
MG
2633retry_cpuset:
2634 cpuset_mems_cookie = get_mems_allowed();
2635
5117f45d 2636 /* The preferred zone is used for statistics later */
f33261d7
DR
2637 first_zones_zonelist(zonelist, high_zoneidx,
2638 nodemask ? : &cpuset_current_mems_allowed,
2639 &preferred_zone);
cc9a6c87
MG
2640 if (!preferred_zone)
2641 goto out;
5117f45d 2642
d95ea5d1
BZ
2643#ifdef CONFIG_CMA
2644 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2645 alloc_flags |= ALLOC_CMA;
2646#endif
5117f45d 2647 /* First allocation attempt */
11e33f6a 2648 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
d95ea5d1 2649 zonelist, high_zoneidx, alloc_flags,
3dd28266 2650 preferred_zone, migratetype);
21caf2fc
ML
2651 if (unlikely(!page)) {
2652 /*
2653 * Runtime PM, block IO and its error handling path
2654 * can deadlock because I/O on the device might not
2655 * complete.
2656 */
2657 gfp_mask = memalloc_noio_flags(gfp_mask);
11e33f6a 2658 page = __alloc_pages_slowpath(gfp_mask, order,
5117f45d 2659 zonelist, high_zoneidx, nodemask,
3dd28266 2660 preferred_zone, migratetype);
21caf2fc 2661 }
11e33f6a 2662
4b4f278c 2663 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
cc9a6c87
MG
2664
2665out:
2666 /*
2667 * When updating a task's mems_allowed, it is possible to race with
2668 * parallel threads in such a way that an allocation can fail while
2669 * the mask is being updated. If a page allocation is about to fail,
2670 * check if the cpuset changed during allocation and if so, retry.
2671 */
2672 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2673 goto retry_cpuset;
2674
6a1a0d3b
GC
2675 memcg_kmem_commit_charge(page, memcg, order);
2676
11e33f6a 2677 return page;
1da177e4 2678}
d239171e 2679EXPORT_SYMBOL(__alloc_pages_nodemask);
1da177e4
LT
2680
2681/*
2682 * Common helper functions.
2683 */
920c7a5d 2684unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4 2685{
945a1113
AM
2686 struct page *page;
2687
2688 /*
2689 * __get_free_pages() returns a 32-bit address, which cannot represent
2690 * a highmem page
2691 */
2692 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2693
1da177e4
LT
2694 page = alloc_pages(gfp_mask, order);
2695 if (!page)
2696 return 0;
2697 return (unsigned long) page_address(page);
2698}
1da177e4
LT
2699EXPORT_SYMBOL(__get_free_pages);
2700
920c7a5d 2701unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4 2702{
945a1113 2703 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1da177e4 2704}
1da177e4
LT
2705EXPORT_SYMBOL(get_zeroed_page);
2706
920c7a5d 2707void __free_pages(struct page *page, unsigned int order)
1da177e4 2708{
b5810039 2709 if (put_page_testzero(page)) {
1da177e4 2710 if (order == 0)
fc91668e 2711 free_hot_cold_page(page, 0);
1da177e4
LT
2712 else
2713 __free_pages_ok(page, order);
2714 }
2715}
2716
2717EXPORT_SYMBOL(__free_pages);
2718
920c7a5d 2719void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
2720{
2721 if (addr != 0) {
725d704e 2722 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
2723 __free_pages(virt_to_page((void *)addr), order);
2724 }
2725}
2726
2727EXPORT_SYMBOL(free_pages);
2728
6a1a0d3b
GC
2729/*
2730 * __free_memcg_kmem_pages and free_memcg_kmem_pages will free
2731 * pages allocated with __GFP_KMEMCG.
2732 *
2733 * Those pages are accounted to a particular memcg, embedded in the
2734 * corresponding page_cgroup. To avoid adding a hit in the allocator to search
2735 * for that information only to find out that it is NULL for users who have no
2736 * interest in that whatsoever, we provide these functions.
2737 *
2738 * The caller knows better which flags it relies on.
2739 */
2740void __free_memcg_kmem_pages(struct page *page, unsigned int order)
2741{
2742 memcg_kmem_uncharge_pages(page, order);
2743 __free_pages(page, order);
2744}
2745
2746void free_memcg_kmem_pages(unsigned long addr, unsigned int order)
2747{
2748 if (addr != 0) {
2749 VM_BUG_ON(!virt_addr_valid((void *)addr));
2750 __free_memcg_kmem_pages(virt_to_page((void *)addr), order);
2751 }
2752}
2753
ee85c2e1
AK
2754static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2755{
2756 if (addr) {
2757 unsigned long alloc_end = addr + (PAGE_SIZE << order);
2758 unsigned long used = addr + PAGE_ALIGN(size);
2759
2760 split_page(virt_to_page((void *)addr), order);
2761 while (used < alloc_end) {
2762 free_page(used);
2763 used += PAGE_SIZE;
2764 }
2765 }
2766 return (void *)addr;
2767}
2768
2be0ffe2
TT
2769/**
2770 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2771 * @size: the number of bytes to allocate
2772 * @gfp_mask: GFP flags for the allocation
2773 *
2774 * This function is similar to alloc_pages(), except that it allocates the
2775 * minimum number of pages to satisfy the request. alloc_pages() can only
2776 * allocate memory in power-of-two pages.
2777 *
2778 * This function is also limited by MAX_ORDER.
2779 *
2780 * Memory allocated by this function must be released by free_pages_exact().
2781 */
2782void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2783{
2784 unsigned int order = get_order(size);
2785 unsigned long addr;
2786
2787 addr = __get_free_pages(gfp_mask, order);
ee85c2e1 2788 return make_alloc_exact(addr, order, size);
2be0ffe2
TT
2789}
2790EXPORT_SYMBOL(alloc_pages_exact);
2791
ee85c2e1
AK
2792/**
2793 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
2794 * pages on a node.
b5e6ab58 2795 * @nid: the preferred node ID where memory should be allocated
ee85c2e1
AK
2796 * @size: the number of bytes to allocate
2797 * @gfp_mask: GFP flags for the allocation
2798 *
2799 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
2800 * back.
2801 * Note this is not alloc_pages_exact_node() which allocates on a specific node,
2802 * but is not exact.
2803 */
2804void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
2805{
2806 unsigned order = get_order(size);
2807 struct page *p = alloc_pages_node(nid, gfp_mask, order);
2808 if (!p)
2809 return NULL;
2810 return make_alloc_exact((unsigned long)page_address(p), order, size);
2811}
2812EXPORT_SYMBOL(alloc_pages_exact_nid);
2813
2be0ffe2
TT
2814/**
2815 * free_pages_exact - release memory allocated via alloc_pages_exact()
2816 * @virt: the value returned by alloc_pages_exact.
2817 * @size: size of allocation, same value as passed to alloc_pages_exact().
2818 *
2819 * Release the memory allocated by a previous call to alloc_pages_exact.
2820 */
2821void free_pages_exact(void *virt, size_t size)
2822{
2823 unsigned long addr = (unsigned long)virt;
2824 unsigned long end = addr + PAGE_ALIGN(size);
2825
2826 while (addr < end) {
2827 free_page(addr);
2828 addr += PAGE_SIZE;
2829 }
2830}
2831EXPORT_SYMBOL(free_pages_exact);
2832
e0fb5815
ZY
2833/**
2834 * nr_free_zone_pages - count number of pages beyond high watermark
2835 * @offset: The zone index of the highest zone
2836 *
2837 * nr_free_zone_pages() counts the number of counts pages which are beyond the
2838 * high watermark within all zones at or below a given zone index. For each
2839 * zone, the number of pages is calculated as:
2840 * present_pages - high_pages
2841 */
ebec3862 2842static unsigned long nr_free_zone_pages(int offset)
1da177e4 2843{
dd1a239f 2844 struct zoneref *z;
54a6eb5c
MG
2845 struct zone *zone;
2846
e310fd43 2847 /* Just pick one node, since fallback list is circular */
ebec3862 2848 unsigned long sum = 0;
1da177e4 2849
0e88460d 2850 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1da177e4 2851
54a6eb5c 2852 for_each_zone_zonelist(zone, z, zonelist, offset) {
b40da049 2853 unsigned long size = zone->managed_pages;
41858966 2854 unsigned long high = high_wmark_pages(zone);
e310fd43
MB
2855 if (size > high)
2856 sum += size - high;
1da177e4
LT
2857 }
2858
2859 return sum;
2860}
2861
e0fb5815
ZY
2862/**
2863 * nr_free_buffer_pages - count number of pages beyond high watermark
2864 *
2865 * nr_free_buffer_pages() counts the number of pages which are beyond the high
2866 * watermark within ZONE_DMA and ZONE_NORMAL.
1da177e4 2867 */
ebec3862 2868unsigned long nr_free_buffer_pages(void)
1da177e4 2869{
af4ca457 2870 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 2871}
c2f1a551 2872EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4 2873
e0fb5815
ZY
2874/**
2875 * nr_free_pagecache_pages - count number of pages beyond high watermark
2876 *
2877 * nr_free_pagecache_pages() counts the number of pages which are beyond the
2878 * high watermark within all zones.
1da177e4 2879 */
ebec3862 2880unsigned long nr_free_pagecache_pages(void)
1da177e4 2881{
2a1e274a 2882 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1da177e4 2883}
08e0f6a9
CL
2884
2885static inline void show_node(struct zone *zone)
1da177e4 2886{
e5adfffc 2887 if (IS_ENABLED(CONFIG_NUMA))
25ba77c1 2888 printk("Node %d ", zone_to_nid(zone));
1da177e4 2889}
1da177e4 2890
1da177e4
LT
2891void si_meminfo(struct sysinfo *val)
2892{
2893 val->totalram = totalram_pages;
2894 val->sharedram = 0;
d23ad423 2895 val->freeram = global_page_state(NR_FREE_PAGES);
1da177e4 2896 val->bufferram = nr_blockdev_pages();
1da177e4
LT
2897 val->totalhigh = totalhigh_pages;
2898 val->freehigh = nr_free_highpages();
1da177e4
LT
2899 val->mem_unit = PAGE_SIZE;
2900}
2901
2902EXPORT_SYMBOL(si_meminfo);
2903
2904#ifdef CONFIG_NUMA
2905void si_meminfo_node(struct sysinfo *val, int nid)
2906{
2907 pg_data_t *pgdat = NODE_DATA(nid);
2908
2909 val->totalram = pgdat->node_present_pages;
d23ad423 2910 val->freeram = node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 2911#ifdef CONFIG_HIGHMEM
b40da049 2912 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
d23ad423
CL
2913 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2914 NR_FREE_PAGES);
98d2b0eb
CL
2915#else
2916 val->totalhigh = 0;
2917 val->freehigh = 0;
2918#endif
1da177e4
LT
2919 val->mem_unit = PAGE_SIZE;
2920}
2921#endif
2922
ddd588b5 2923/*
7bf02ea2
DR
2924 * Determine whether the node should be displayed or not, depending on whether
2925 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
ddd588b5 2926 */
7bf02ea2 2927bool skip_free_areas_node(unsigned int flags, int nid)
ddd588b5
DR
2928{
2929 bool ret = false;
cc9a6c87 2930 unsigned int cpuset_mems_cookie;
ddd588b5
DR
2931
2932 if (!(flags & SHOW_MEM_FILTER_NODES))
2933 goto out;
2934
cc9a6c87
MG
2935 do {
2936 cpuset_mems_cookie = get_mems_allowed();
2937 ret = !node_isset(nid, cpuset_current_mems_allowed);
2938 } while (!put_mems_allowed(cpuset_mems_cookie));
ddd588b5
DR
2939out:
2940 return ret;
2941}
2942
1da177e4
LT
2943#define K(x) ((x) << (PAGE_SHIFT-10))
2944
377e4f16
RV
2945static void show_migration_types(unsigned char type)
2946{
2947 static const char types[MIGRATE_TYPES] = {
2948 [MIGRATE_UNMOVABLE] = 'U',
2949 [MIGRATE_RECLAIMABLE] = 'E',
2950 [MIGRATE_MOVABLE] = 'M',
2951 [MIGRATE_RESERVE] = 'R',
2952#ifdef CONFIG_CMA
2953 [MIGRATE_CMA] = 'C',
2954#endif
194159fb 2955#ifdef CONFIG_MEMORY_ISOLATION
377e4f16 2956 [MIGRATE_ISOLATE] = 'I',
194159fb 2957#endif
377e4f16
RV
2958 };
2959 char tmp[MIGRATE_TYPES + 1];
2960 char *p = tmp;
2961 int i;
2962
2963 for (i = 0; i < MIGRATE_TYPES; i++) {
2964 if (type & (1 << i))
2965 *p++ = types[i];
2966 }
2967
2968 *p = '\0';
2969 printk("(%s) ", tmp);
2970}
2971
1da177e4
LT
2972/*
2973 * Show free area list (used inside shift_scroll-lock stuff)
2974 * We also calculate the percentage fragmentation. We do this by counting the
2975 * memory on each free list with the exception of the first item on the list.
ddd588b5
DR
2976 * Suppresses nodes that are not allowed by current's cpuset if
2977 * SHOW_MEM_FILTER_NODES is passed.
1da177e4 2978 */
7bf02ea2 2979void show_free_areas(unsigned int filter)
1da177e4 2980{
c7241913 2981 int cpu;
1da177e4
LT
2982 struct zone *zone;
2983
ee99c71c 2984 for_each_populated_zone(zone) {
7bf02ea2 2985 if (skip_free_areas_node(filter, zone_to_nid(zone)))
ddd588b5 2986 continue;
c7241913
JS
2987 show_node(zone);
2988 printk("%s per-cpu:\n", zone->name);
1da177e4 2989
6b482c67 2990 for_each_online_cpu(cpu) {
1da177e4
LT
2991 struct per_cpu_pageset *pageset;
2992
99dcc3e5 2993 pageset = per_cpu_ptr(zone->pageset, cpu);
1da177e4 2994
3dfa5721
CL
2995 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2996 cpu, pageset->pcp.high,
2997 pageset->pcp.batch, pageset->pcp.count);
1da177e4
LT
2998 }
2999 }
3000
a731286d
KM
3001 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
3002 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
7b854121 3003 " unevictable:%lu"
b76146ed 3004 " dirty:%lu writeback:%lu unstable:%lu\n"
3701b033 3005 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
d1ce749a
BZ
3006 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
3007 " free_cma:%lu\n",
4f98a2fe 3008 global_page_state(NR_ACTIVE_ANON),
4f98a2fe 3009 global_page_state(NR_INACTIVE_ANON),
a731286d
KM
3010 global_page_state(NR_ISOLATED_ANON),
3011 global_page_state(NR_ACTIVE_FILE),
4f98a2fe 3012 global_page_state(NR_INACTIVE_FILE),
a731286d 3013 global_page_state(NR_ISOLATED_FILE),
7b854121 3014 global_page_state(NR_UNEVICTABLE),
b1e7a8fd 3015 global_page_state(NR_FILE_DIRTY),
ce866b34 3016 global_page_state(NR_WRITEBACK),
fd39fc85 3017 global_page_state(NR_UNSTABLE_NFS),
d23ad423 3018 global_page_state(NR_FREE_PAGES),
3701b033
KM
3019 global_page_state(NR_SLAB_RECLAIMABLE),
3020 global_page_state(NR_SLAB_UNRECLAIMABLE),
65ba55f5 3021 global_page_state(NR_FILE_MAPPED),
4b02108a 3022 global_page_state(NR_SHMEM),
a25700a5 3023 global_page_state(NR_PAGETABLE),
d1ce749a
BZ
3024 global_page_state(NR_BOUNCE),
3025 global_page_state(NR_FREE_CMA_PAGES));
1da177e4 3026
ee99c71c 3027 for_each_populated_zone(zone) {
1da177e4
LT
3028 int i;
3029
7bf02ea2 3030 if (skip_free_areas_node(filter, zone_to_nid(zone)))
ddd588b5 3031 continue;
1da177e4
LT
3032 show_node(zone);
3033 printk("%s"
3034 " free:%lukB"
3035 " min:%lukB"
3036 " low:%lukB"
3037 " high:%lukB"
4f98a2fe
RR
3038 " active_anon:%lukB"
3039 " inactive_anon:%lukB"
3040 " active_file:%lukB"
3041 " inactive_file:%lukB"
7b854121 3042 " unevictable:%lukB"
a731286d
KM
3043 " isolated(anon):%lukB"
3044 " isolated(file):%lukB"
1da177e4 3045 " present:%lukB"
9feedc9d 3046 " managed:%lukB"
4a0aa73f
KM
3047 " mlocked:%lukB"
3048 " dirty:%lukB"
3049 " writeback:%lukB"
3050 " mapped:%lukB"
4b02108a 3051 " shmem:%lukB"
4a0aa73f
KM
3052 " slab_reclaimable:%lukB"
3053 " slab_unreclaimable:%lukB"
c6a7f572 3054 " kernel_stack:%lukB"
4a0aa73f
KM
3055 " pagetables:%lukB"
3056 " unstable:%lukB"
3057 " bounce:%lukB"
d1ce749a 3058 " free_cma:%lukB"
4a0aa73f 3059 " writeback_tmp:%lukB"
1da177e4
LT
3060 " pages_scanned:%lu"
3061 " all_unreclaimable? %s"
3062 "\n",
3063 zone->name,
88f5acf8 3064 K(zone_page_state(zone, NR_FREE_PAGES)),
41858966
MG
3065 K(min_wmark_pages(zone)),
3066 K(low_wmark_pages(zone)),
3067 K(high_wmark_pages(zone)),
4f98a2fe
RR
3068 K(zone_page_state(zone, NR_ACTIVE_ANON)),
3069 K(zone_page_state(zone, NR_INACTIVE_ANON)),
3070 K(zone_page_state(zone, NR_ACTIVE_FILE)),
3071 K(zone_page_state(zone, NR_INACTIVE_FILE)),
7b854121 3072 K(zone_page_state(zone, NR_UNEVICTABLE)),
a731286d
KM
3073 K(zone_page_state(zone, NR_ISOLATED_ANON)),
3074 K(zone_page_state(zone, NR_ISOLATED_FILE)),
1da177e4 3075 K(zone->present_pages),
9feedc9d 3076 K(zone->managed_pages),
4a0aa73f
KM
3077 K(zone_page_state(zone, NR_MLOCK)),
3078 K(zone_page_state(zone, NR_FILE_DIRTY)),
3079 K(zone_page_state(zone, NR_WRITEBACK)),
3080 K(zone_page_state(zone, NR_FILE_MAPPED)),
4b02108a 3081 K(zone_page_state(zone, NR_SHMEM)),
4a0aa73f
KM
3082 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
3083 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
c6a7f572
KM
3084 zone_page_state(zone, NR_KERNEL_STACK) *
3085 THREAD_SIZE / 1024,
4a0aa73f
KM
3086 K(zone_page_state(zone, NR_PAGETABLE)),
3087 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
3088 K(zone_page_state(zone, NR_BOUNCE)),
d1ce749a 3089 K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
4a0aa73f 3090 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
1da177e4 3091 zone->pages_scanned,
93e4a89a 3092 (zone->all_unreclaimable ? "yes" : "no")
1da177e4
LT
3093 );
3094 printk("lowmem_reserve[]:");
3095 for (i = 0; i < MAX_NR_ZONES; i++)
3096 printk(" %lu", zone->lowmem_reserve[i]);
3097 printk("\n");
3098 }
3099
ee99c71c 3100 for_each_populated_zone(zone) {
8f9de51a 3101 unsigned long nr[MAX_ORDER], flags, order, total = 0;
377e4f16 3102 unsigned char types[MAX_ORDER];
1da177e4 3103
7bf02ea2 3104 if (skip_free_areas_node(filter, zone_to_nid(zone)))
ddd588b5 3105 continue;
1da177e4
LT
3106 show_node(zone);
3107 printk("%s: ", zone->name);
1da177e4
LT
3108
3109 spin_lock_irqsave(&zone->lock, flags);
3110 for (order = 0; order < MAX_ORDER; order++) {
377e4f16
RV
3111 struct free_area *area = &zone->free_area[order];
3112 int type;
3113
3114 nr[order] = area->nr_free;
8f9de51a 3115 total += nr[order] << order;
377e4f16
RV
3116
3117 types[order] = 0;
3118 for (type = 0; type < MIGRATE_TYPES; type++) {
3119 if (!list_empty(&area->free_list[type]))
3120 types[order] |= 1 << type;
3121 }
1da177e4
LT
3122 }
3123 spin_unlock_irqrestore(&zone->lock, flags);
377e4f16 3124 for (order = 0; order < MAX_ORDER; order++) {
8f9de51a 3125 printk("%lu*%lukB ", nr[order], K(1UL) << order);
377e4f16
RV
3126 if (nr[order])
3127 show_migration_types(types[order]);
3128 }
1da177e4
LT
3129 printk("= %lukB\n", K(total));
3130 }
3131
949f7ec5
DR
3132 hugetlb_show_meminfo();
3133
e6f3602d
LW
3134 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
3135
1da177e4
LT
3136 show_swap_cache_info();
3137}
3138
19770b32
MG
3139static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
3140{
3141 zoneref->zone = zone;
3142 zoneref->zone_idx = zone_idx(zone);
3143}
3144
1da177e4
LT
3145/*
3146 * Builds allocation fallback zone lists.
1a93205b
CL
3147 *
3148 * Add all populated zones of a node to the zonelist.
1da177e4 3149 */
f0c0b2b8
KH
3150static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
3151 int nr_zones, enum zone_type zone_type)
1da177e4 3152{
1a93205b
CL
3153 struct zone *zone;
3154
98d2b0eb 3155 BUG_ON(zone_type >= MAX_NR_ZONES);
2f6726e5 3156 zone_type++;
02a68a5e
CL
3157
3158 do {
2f6726e5 3159 zone_type--;
070f8032 3160 zone = pgdat->node_zones + zone_type;
1a93205b 3161 if (populated_zone(zone)) {
dd1a239f
MG
3162 zoneref_set_zone(zone,
3163 &zonelist->_zonerefs[nr_zones++]);
070f8032 3164 check_highest_zone(zone_type);
1da177e4 3165 }
02a68a5e 3166
2f6726e5 3167 } while (zone_type);
070f8032 3168 return nr_zones;
1da177e4
LT
3169}
3170
f0c0b2b8
KH
3171
3172/*
3173 * zonelist_order:
3174 * 0 = automatic detection of better ordering.
3175 * 1 = order by ([node] distance, -zonetype)
3176 * 2 = order by (-zonetype, [node] distance)
3177 *
3178 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
3179 * the same zonelist. So only NUMA can configure this param.
3180 */
3181#define ZONELIST_ORDER_DEFAULT 0
3182#define ZONELIST_ORDER_NODE 1
3183#define ZONELIST_ORDER_ZONE 2
3184
3185/* zonelist order in the kernel.
3186 * set_zonelist_order() will set this to NODE or ZONE.
3187 */
3188static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
3189static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
3190
3191
1da177e4 3192#ifdef CONFIG_NUMA
f0c0b2b8
KH
3193/* The value user specified ....changed by config */
3194static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3195/* string for sysctl */
3196#define NUMA_ZONELIST_ORDER_LEN 16
3197char numa_zonelist_order[16] = "default";
3198
3199/*
3200 * interface for configure zonelist ordering.
3201 * command line option "numa_zonelist_order"
3202 * = "[dD]efault - default, automatic configuration.
3203 * = "[nN]ode - order by node locality, then by zone within node
3204 * = "[zZ]one - order by zone, then by locality within zone
3205 */
3206
3207static int __parse_numa_zonelist_order(char *s)
3208{
3209 if (*s == 'd' || *s == 'D') {
3210 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3211 } else if (*s == 'n' || *s == 'N') {
3212 user_zonelist_order = ZONELIST_ORDER_NODE;
3213 } else if (*s == 'z' || *s == 'Z') {
3214 user_zonelist_order = ZONELIST_ORDER_ZONE;
3215 } else {
3216 printk(KERN_WARNING
3217 "Ignoring invalid numa_zonelist_order value: "
3218 "%s\n", s);
3219 return -EINVAL;
3220 }
3221 return 0;
3222}
3223
3224static __init int setup_numa_zonelist_order(char *s)
3225{
ecb256f8
VL
3226 int ret;
3227
3228 if (!s)
3229 return 0;
3230
3231 ret = __parse_numa_zonelist_order(s);
3232 if (ret == 0)
3233 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
3234
3235 return ret;
f0c0b2b8
KH
3236}
3237early_param("numa_zonelist_order", setup_numa_zonelist_order);
3238
3239/*
3240 * sysctl handler for numa_zonelist_order
3241 */
3242int numa_zonelist_order_handler(ctl_table *table, int write,
8d65af78 3243 void __user *buffer, size_t *length,
f0c0b2b8
KH
3244 loff_t *ppos)
3245{
3246 char saved_string[NUMA_ZONELIST_ORDER_LEN];
3247 int ret;
443c6f14 3248 static DEFINE_MUTEX(zl_order_mutex);
f0c0b2b8 3249
443c6f14 3250 mutex_lock(&zl_order_mutex);
f0c0b2b8 3251 if (write)
443c6f14 3252 strcpy(saved_string, (char*)table->data);
8d65af78 3253 ret = proc_dostring(table, write, buffer, length, ppos);
f0c0b2b8 3254 if (ret)
443c6f14 3255 goto out;
f0c0b2b8
KH
3256 if (write) {
3257 int oldval = user_zonelist_order;
3258 if (__parse_numa_zonelist_order((char*)table->data)) {
3259 /*
3260 * bogus value. restore saved string
3261 */
3262 strncpy((char*)table->data, saved_string,
3263 NUMA_ZONELIST_ORDER_LEN);
3264 user_zonelist_order = oldval;
4eaf3f64
HL
3265 } else if (oldval != user_zonelist_order) {
3266 mutex_lock(&zonelists_mutex);
9adb62a5 3267 build_all_zonelists(NULL, NULL);
4eaf3f64
HL
3268 mutex_unlock(&zonelists_mutex);
3269 }
f0c0b2b8 3270 }
443c6f14
AK
3271out:
3272 mutex_unlock(&zl_order_mutex);
3273 return ret;
f0c0b2b8
KH
3274}
3275
3276
62bc62a8 3277#define MAX_NODE_LOAD (nr_online_nodes)
f0c0b2b8
KH
3278static int node_load[MAX_NUMNODES];
3279
1da177e4 3280/**
4dc3b16b 3281 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
3282 * @node: node whose fallback list we're appending
3283 * @used_node_mask: nodemask_t of already used nodes
3284 *
3285 * We use a number of factors to determine which is the next node that should
3286 * appear on a given node's fallback list. The node should not have appeared
3287 * already in @node's fallback list, and it should be the next closest node
3288 * according to the distance array (which contains arbitrary distance values
3289 * from each node to each node in the system), and should also prefer nodes
3290 * with no CPUs, since presumably they'll have very little allocation pressure
3291 * on them otherwise.
3292 * It returns -1 if no node is found.
3293 */
f0c0b2b8 3294static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 3295{
4cf808eb 3296 int n, val;
1da177e4 3297 int min_val = INT_MAX;
00ef2d2f 3298 int best_node = NUMA_NO_NODE;
a70f7302 3299 const struct cpumask *tmp = cpumask_of_node(0);
1da177e4 3300
4cf808eb
LT
3301 /* Use the local node if we haven't already */
3302 if (!node_isset(node, *used_node_mask)) {
3303 node_set(node, *used_node_mask);
3304 return node;
3305 }
1da177e4 3306
4b0ef1fe 3307 for_each_node_state(n, N_MEMORY) {
1da177e4
LT
3308
3309 /* Don't want a node to appear more than once */
3310 if (node_isset(n, *used_node_mask))
3311 continue;
3312
1da177e4
LT
3313 /* Use the distance array to find the distance */
3314 val = node_distance(node, n);
3315
4cf808eb
LT
3316 /* Penalize nodes under us ("prefer the next node") */
3317 val += (n < node);
3318
1da177e4 3319 /* Give preference to headless and unused nodes */
a70f7302
RR
3320 tmp = cpumask_of_node(n);
3321 if (!cpumask_empty(tmp))
1da177e4
LT
3322 val += PENALTY_FOR_NODE_WITH_CPUS;
3323
3324 /* Slight preference for less loaded node */
3325 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
3326 val += node_load[n];
3327
3328 if (val < min_val) {
3329 min_val = val;
3330 best_node = n;
3331 }
3332 }
3333
3334 if (best_node >= 0)
3335 node_set(best_node, *used_node_mask);
3336
3337 return best_node;
3338}
3339
f0c0b2b8
KH
3340
3341/*
3342 * Build zonelists ordered by node and zones within node.
3343 * This results in maximum locality--normal zone overflows into local
3344 * DMA zone, if any--but risks exhausting DMA zone.
3345 */
3346static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
1da177e4 3347{
f0c0b2b8 3348 int j;
1da177e4 3349 struct zonelist *zonelist;
f0c0b2b8 3350
54a6eb5c 3351 zonelist = &pgdat->node_zonelists[0];
dd1a239f 3352 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
54a6eb5c
MG
3353 ;
3354 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3355 MAX_NR_ZONES - 1);
dd1a239f
MG
3356 zonelist->_zonerefs[j].zone = NULL;
3357 zonelist->_zonerefs[j].zone_idx = 0;
f0c0b2b8
KH
3358}
3359
523b9458
CL
3360/*
3361 * Build gfp_thisnode zonelists
3362 */
3363static void build_thisnode_zonelists(pg_data_t *pgdat)
3364{
523b9458
CL
3365 int j;
3366 struct zonelist *zonelist;
3367
54a6eb5c
MG
3368 zonelist = &pgdat->node_zonelists[1];
3369 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
dd1a239f
MG
3370 zonelist->_zonerefs[j].zone = NULL;
3371 zonelist->_zonerefs[j].zone_idx = 0;
523b9458
CL
3372}
3373
f0c0b2b8
KH
3374/*
3375 * Build zonelists ordered by zone and nodes within zones.
3376 * This results in conserving DMA zone[s] until all Normal memory is
3377 * exhausted, but results in overflowing to remote node while memory
3378 * may still exist in local DMA zone.
3379 */
3380static int node_order[MAX_NUMNODES];
3381
3382static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
3383{
f0c0b2b8
KH
3384 int pos, j, node;
3385 int zone_type; /* needs to be signed */
3386 struct zone *z;
3387 struct zonelist *zonelist;
3388
54a6eb5c
MG
3389 zonelist = &pgdat->node_zonelists[0];
3390 pos = 0;
3391 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
3392 for (j = 0; j < nr_nodes; j++) {
3393 node = node_order[j];
3394 z = &NODE_DATA(node)->node_zones[zone_type];
3395 if (populated_zone(z)) {
dd1a239f
MG
3396 zoneref_set_zone(z,
3397 &zonelist->_zonerefs[pos++]);
54a6eb5c 3398 check_highest_zone(zone_type);
f0c0b2b8
KH
3399 }
3400 }
f0c0b2b8 3401 }
dd1a239f
MG
3402 zonelist->_zonerefs[pos].zone = NULL;
3403 zonelist->_zonerefs[pos].zone_idx = 0;
f0c0b2b8
KH
3404}
3405
3406static int default_zonelist_order(void)
3407{
3408 int nid, zone_type;
3409 unsigned long low_kmem_size,total_size;
3410 struct zone *z;
3411 int average_size;
3412 /*
88393161 3413 * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
f0c0b2b8
KH
3414 * If they are really small and used heavily, the system can fall
3415 * into OOM very easily.
e325c90f 3416 * This function detect ZONE_DMA/DMA32 size and configures zone order.
f0c0b2b8
KH
3417 */
3418 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
3419 low_kmem_size = 0;
3420 total_size = 0;
3421 for_each_online_node(nid) {
3422 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3423 z = &NODE_DATA(nid)->node_zones[zone_type];
3424 if (populated_zone(z)) {
3425 if (zone_type < ZONE_NORMAL)
3426 low_kmem_size += z->present_pages;
3427 total_size += z->present_pages;
e325c90f
DR
3428 } else if (zone_type == ZONE_NORMAL) {
3429 /*
3430 * If any node has only lowmem, then node order
3431 * is preferred to allow kernel allocations
3432 * locally; otherwise, they can easily infringe
3433 * on other nodes when there is an abundance of
3434 * lowmem available to allocate from.
3435 */
3436 return ZONELIST_ORDER_NODE;
f0c0b2b8
KH
3437 }
3438 }
3439 }
3440 if (!low_kmem_size || /* there are no DMA area. */
3441 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
3442 return ZONELIST_ORDER_NODE;
3443 /*
3444 * look into each node's config.
3445 * If there is a node whose DMA/DMA32 memory is very big area on
3446 * local memory, NODE_ORDER may be suitable.
3447 */
37b07e41 3448 average_size = total_size /
4b0ef1fe 3449 (nodes_weight(node_states[N_MEMORY]) + 1);
f0c0b2b8
KH
3450 for_each_online_node(nid) {
3451 low_kmem_size = 0;
3452 total_size = 0;
3453 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3454 z = &NODE_DATA(nid)->node_zones[zone_type];
3455 if (populated_zone(z)) {
3456 if (zone_type < ZONE_NORMAL)
3457 low_kmem_size += z->present_pages;
3458 total_size += z->present_pages;
3459 }
3460 }
3461 if (low_kmem_size &&
3462 total_size > average_size && /* ignore small node */
3463 low_kmem_size > total_size * 70/100)
3464 return ZONELIST_ORDER_NODE;
3465 }
3466 return ZONELIST_ORDER_ZONE;
3467}
3468
3469static void set_zonelist_order(void)
3470{
3471 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
3472 current_zonelist_order = default_zonelist_order();
3473 else
3474 current_zonelist_order = user_zonelist_order;
3475}
3476
3477static void build_zonelists(pg_data_t *pgdat)
3478{
3479 int j, node, load;
3480 enum zone_type i;
1da177e4 3481 nodemask_t used_mask;
f0c0b2b8
KH
3482 int local_node, prev_node;
3483 struct zonelist *zonelist;
3484 int order = current_zonelist_order;
1da177e4
LT
3485
3486 /* initialize zonelists */
523b9458 3487 for (i = 0; i < MAX_ZONELISTS; i++) {
1da177e4 3488 zonelist = pgdat->node_zonelists + i;
dd1a239f
MG
3489 zonelist->_zonerefs[0].zone = NULL;
3490 zonelist->_zonerefs[0].zone_idx = 0;
1da177e4
LT
3491 }
3492
3493 /* NUMA-aware ordering of nodes */
3494 local_node = pgdat->node_id;
62bc62a8 3495 load = nr_online_nodes;
1da177e4
LT
3496 prev_node = local_node;
3497 nodes_clear(used_mask);
f0c0b2b8 3498
f0c0b2b8
KH
3499 memset(node_order, 0, sizeof(node_order));
3500 j = 0;
3501
1da177e4
LT
3502 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
3503 /*
3504 * We don't want to pressure a particular node.
3505 * So adding penalty to the first node in same
3506 * distance group to make it round-robin.
3507 */
957f822a
DR
3508 if (node_distance(local_node, node) !=
3509 node_distance(local_node, prev_node))
f0c0b2b8
KH
3510 node_load[node] = load;
3511
1da177e4
LT
3512 prev_node = node;
3513 load--;
f0c0b2b8
KH
3514 if (order == ZONELIST_ORDER_NODE)
3515 build_zonelists_in_node_order(pgdat, node);
3516 else
3517 node_order[j++] = node; /* remember order */
3518 }
1da177e4 3519
f0c0b2b8
KH
3520 if (order == ZONELIST_ORDER_ZONE) {
3521 /* calculate node order -- i.e., DMA last! */
3522 build_zonelists_in_zone_order(pgdat, j);
1da177e4 3523 }
523b9458
CL
3524
3525 build_thisnode_zonelists(pgdat);
1da177e4
LT
3526}
3527
9276b1bc 3528/* Construct the zonelist performance cache - see further mmzone.h */
f0c0b2b8 3529static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 3530{
54a6eb5c
MG
3531 struct zonelist *zonelist;
3532 struct zonelist_cache *zlc;
dd1a239f 3533 struct zoneref *z;
9276b1bc 3534
54a6eb5c
MG
3535 zonelist = &pgdat->node_zonelists[0];
3536 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3537 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
dd1a239f
MG
3538 for (z = zonelist->_zonerefs; z->zone; z++)
3539 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
9276b1bc
PJ
3540}
3541
7aac7898
LS
3542#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3543/*
3544 * Return node id of node used for "local" allocations.
3545 * I.e., first node id of first zone in arg node's generic zonelist.
3546 * Used for initializing percpu 'numa_mem', which is used primarily
3547 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
3548 */
3549int local_memory_node(int node)
3550{
3551 struct zone *zone;
3552
3553 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3554 gfp_zone(GFP_KERNEL),
3555 NULL,
3556 &zone);
3557 return zone->node;
3558}
3559#endif
f0c0b2b8 3560
1da177e4
LT
3561#else /* CONFIG_NUMA */
3562
f0c0b2b8
KH
3563static void set_zonelist_order(void)
3564{
3565 current_zonelist_order = ZONELIST_ORDER_ZONE;
3566}
3567
3568static void build_zonelists(pg_data_t *pgdat)
1da177e4 3569{
19655d34 3570 int node, local_node;
54a6eb5c
MG
3571 enum zone_type j;
3572 struct zonelist *zonelist;
1da177e4
LT
3573
3574 local_node = pgdat->node_id;
1da177e4 3575
54a6eb5c
MG
3576 zonelist = &pgdat->node_zonelists[0];
3577 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
1da177e4 3578
54a6eb5c
MG
3579 /*
3580 * Now we build the zonelist so that it contains the zones
3581 * of all the other nodes.
3582 * We don't want to pressure a particular node, so when
3583 * building the zones for node N, we make sure that the
3584 * zones coming right after the local ones are those from
3585 * node N+1 (modulo N)
3586 */
3587 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3588 if (!node_online(node))
3589 continue;
3590 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3591 MAX_NR_ZONES - 1);
1da177e4 3592 }
54a6eb5c
MG
3593 for (node = 0; node < local_node; node++) {
3594 if (!node_online(node))
3595 continue;
3596 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3597 MAX_NR_ZONES - 1);
3598 }
3599
dd1a239f
MG
3600 zonelist->_zonerefs[j].zone = NULL;
3601 zonelist->_zonerefs[j].zone_idx = 0;
1da177e4
LT
3602}
3603
9276b1bc 3604/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
f0c0b2b8 3605static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 3606{
54a6eb5c 3607 pgdat->node_zonelists[0].zlcache_ptr = NULL;
9276b1bc
PJ
3608}
3609
1da177e4
LT
3610#endif /* CONFIG_NUMA */
3611
99dcc3e5
CL
3612/*
3613 * Boot pageset table. One per cpu which is going to be used for all
3614 * zones and all nodes. The parameters will be set in such a way
3615 * that an item put on a list will immediately be handed over to
3616 * the buddy list. This is safe since pageset manipulation is done
3617 * with interrupts disabled.
3618 *
3619 * The boot_pagesets must be kept even after bootup is complete for
3620 * unused processors and/or zones. They do play a role for bootstrapping
3621 * hotplugged processors.
3622 *
3623 * zoneinfo_show() and maybe other functions do
3624 * not check if the processor is online before following the pageset pointer.
3625 * Other parts of the kernel may not check if the zone is available.
3626 */
3627static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3628static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
1f522509 3629static void setup_zone_pageset(struct zone *zone);
99dcc3e5 3630
4eaf3f64
HL
3631/*
3632 * Global mutex to protect against size modification of zonelists
3633 * as well as to serialize pageset setup for the new populated zone.
3634 */
3635DEFINE_MUTEX(zonelists_mutex);
3636
9b1a4d38 3637/* return values int ....just for stop_machine() */
4ed7e022 3638static int __build_all_zonelists(void *data)
1da177e4 3639{
6811378e 3640 int nid;
99dcc3e5 3641 int cpu;
9adb62a5 3642 pg_data_t *self = data;
9276b1bc 3643
7f9cfb31
BL
3644#ifdef CONFIG_NUMA
3645 memset(node_load, 0, sizeof(node_load));
3646#endif
9adb62a5
JL
3647
3648 if (self && !node_online(self->node_id)) {
3649 build_zonelists(self);
3650 build_zonelist_cache(self);
3651 }
3652
9276b1bc 3653 for_each_online_node(nid) {
7ea1530a
CL
3654 pg_data_t *pgdat = NODE_DATA(nid);
3655
3656 build_zonelists(pgdat);
3657 build_zonelist_cache(pgdat);
9276b1bc 3658 }
99dcc3e5
CL
3659
3660 /*
3661 * Initialize the boot_pagesets that are going to be used
3662 * for bootstrapping processors. The real pagesets for
3663 * each zone will be allocated later when the per cpu
3664 * allocator is available.
3665 *
3666 * boot_pagesets are used also for bootstrapping offline
3667 * cpus if the system is already booted because the pagesets
3668 * are needed to initialize allocators on a specific cpu too.
3669 * F.e. the percpu allocator needs the page allocator which
3670 * needs the percpu allocator in order to allocate its pagesets
3671 * (a chicken-egg dilemma).
3672 */
7aac7898 3673 for_each_possible_cpu(cpu) {
99dcc3e5
CL
3674 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3675
7aac7898
LS
3676#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3677 /*
3678 * We now know the "local memory node" for each node--
3679 * i.e., the node of the first zone in the generic zonelist.
3680 * Set up numa_mem percpu variable for on-line cpus. During
3681 * boot, only the boot cpu should be on-line; we'll init the
3682 * secondary cpus' numa_mem as they come on-line. During
3683 * node/memory hotplug, we'll fixup all on-line cpus.
3684 */
3685 if (cpu_online(cpu))
3686 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3687#endif
3688 }
3689
6811378e
YG
3690 return 0;
3691}
3692
4eaf3f64
HL
3693/*
3694 * Called with zonelists_mutex held always
3695 * unless system_state == SYSTEM_BOOTING.
3696 */
9adb62a5 3697void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
6811378e 3698{
f0c0b2b8
KH
3699 set_zonelist_order();
3700
6811378e 3701 if (system_state == SYSTEM_BOOTING) {
423b41d7 3702 __build_all_zonelists(NULL);
68ad8df4 3703 mminit_verify_zonelist();
6811378e
YG
3704 cpuset_init_current_mems_allowed();
3705 } else {
183ff22b 3706 /* we have to stop all cpus to guarantee there is no user
6811378e 3707 of zonelist */
e9959f0f 3708#ifdef CONFIG_MEMORY_HOTPLUG
9adb62a5
JL
3709 if (zone)
3710 setup_zone_pageset(zone);
e9959f0f 3711#endif
9adb62a5 3712 stop_machine(__build_all_zonelists, pgdat, NULL);
6811378e
YG
3713 /* cpuset refresh routine should be here */
3714 }
bd1e22b8 3715 vm_total_pages = nr_free_pagecache_pages();
9ef9acb0
MG
3716 /*
3717 * Disable grouping by mobility if the number of pages in the
3718 * system is too low to allow the mechanism to work. It would be
3719 * more accurate, but expensive to check per-zone. This check is
3720 * made on memory-hotadd so a system can start with mobility
3721 * disabled and enable it later
3722 */
d9c23400 3723 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
3724 page_group_by_mobility_disabled = 1;
3725 else
3726 page_group_by_mobility_disabled = 0;
3727
3728 printk("Built %i zonelists in %s order, mobility grouping %s. "
3729 "Total pages: %ld\n",
62bc62a8 3730 nr_online_nodes,
f0c0b2b8 3731 zonelist_order_name[current_zonelist_order],
9ef9acb0 3732 page_group_by_mobility_disabled ? "off" : "on",
f0c0b2b8
KH
3733 vm_total_pages);
3734#ifdef CONFIG_NUMA
3735 printk("Policy zone: %s\n", zone_names[policy_zone]);
3736#endif
1da177e4
LT
3737}
3738
3739/*
3740 * Helper functions to size the waitqueue hash table.
3741 * Essentially these want to choose hash table sizes sufficiently
3742 * large so that collisions trying to wait on pages are rare.
3743 * But in fact, the number of active page waitqueues on typical
3744 * systems is ridiculously low, less than 200. So this is even
3745 * conservative, even though it seems large.
3746 *
3747 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3748 * waitqueues, i.e. the size of the waitq table given the number of pages.
3749 */
3750#define PAGES_PER_WAITQUEUE 256
3751
cca448fe 3752#ifndef CONFIG_MEMORY_HOTPLUG
02b694de 3753static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1da177e4
LT
3754{
3755 unsigned long size = 1;
3756
3757 pages /= PAGES_PER_WAITQUEUE;
3758
3759 while (size < pages)
3760 size <<= 1;
3761
3762 /*
3763 * Once we have dozens or even hundreds of threads sleeping
3764 * on IO we've got bigger problems than wait queue collision.
3765 * Limit the size of the wait table to a reasonable size.
3766 */
3767 size = min(size, 4096UL);
3768
3769 return max(size, 4UL);
3770}
cca448fe
YG
3771#else
3772/*
3773 * A zone's size might be changed by hot-add, so it is not possible to determine
3774 * a suitable size for its wait_table. So we use the maximum size now.
3775 *
3776 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
3777 *
3778 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
3779 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3780 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
3781 *
3782 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3783 * or more by the traditional way. (See above). It equals:
3784 *
3785 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
3786 * ia64(16K page size) : = ( 8G + 4M)byte.
3787 * powerpc (64K page size) : = (32G +16M)byte.
3788 */
3789static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3790{
3791 return 4096UL;
3792}
3793#endif
1da177e4
LT
3794
3795/*
3796 * This is an integer logarithm so that shifts can be used later
3797 * to extract the more random high bits from the multiplicative
3798 * hash function before the remainder is taken.
3799 */
3800static inline unsigned long wait_table_bits(unsigned long size)
3801{
3802 return ffz(~size);
3803}
3804
3805#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3806
6d3163ce
AH
3807/*
3808 * Check if a pageblock contains reserved pages
3809 */
3810static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
3811{
3812 unsigned long pfn;
3813
3814 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3815 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
3816 return 1;
3817 }
3818 return 0;
3819}
3820
56fd56b8 3821/*
d9c23400 3822 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
41858966
MG
3823 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3824 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
56fd56b8
MG
3825 * higher will lead to a bigger reserve which will get freed as contiguous
3826 * blocks as reclaim kicks in
3827 */
3828static void setup_zone_migrate_reserve(struct zone *zone)
3829{
6d3163ce 3830 unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
56fd56b8 3831 struct page *page;
78986a67
MG
3832 unsigned long block_migratetype;
3833 int reserve;
56fd56b8 3834
d0215638
MH
3835 /*
3836 * Get the start pfn, end pfn and the number of blocks to reserve
3837 * We have to be careful to be aligned to pageblock_nr_pages to
3838 * make sure that we always check pfn_valid for the first page in
3839 * the block.
3840 */
56fd56b8 3841 start_pfn = zone->zone_start_pfn;
108bcc96 3842 end_pfn = zone_end_pfn(zone);
d0215638 3843 start_pfn = roundup(start_pfn, pageblock_nr_pages);
41858966 3844 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
d9c23400 3845 pageblock_order;
56fd56b8 3846
78986a67
MG
3847 /*
3848 * Reserve blocks are generally in place to help high-order atomic
3849 * allocations that are short-lived. A min_free_kbytes value that
3850 * would result in more than 2 reserve blocks for atomic allocations
3851 * is assumed to be in place to help anti-fragmentation for the
3852 * future allocation of hugepages at runtime.
3853 */
3854 reserve = min(2, reserve);
3855
d9c23400 3856 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
56fd56b8
MG
3857 if (!pfn_valid(pfn))
3858 continue;
3859 page = pfn_to_page(pfn);
3860
344c790e
AL
3861 /* Watch out for overlapping nodes */
3862 if (page_to_nid(page) != zone_to_nid(zone))
3863 continue;
3864
56fd56b8
MG
3865 block_migratetype = get_pageblock_migratetype(page);
3866
938929f1
MG
3867 /* Only test what is necessary when the reserves are not met */
3868 if (reserve > 0) {
3869 /*
3870 * Blocks with reserved pages will never free, skip
3871 * them.
3872 */
3873 block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
3874 if (pageblock_is_reserved(pfn, block_end_pfn))
3875 continue;
56fd56b8 3876
938929f1
MG
3877 /* If this block is reserved, account for it */
3878 if (block_migratetype == MIGRATE_RESERVE) {
3879 reserve--;
3880 continue;
3881 }
3882
3883 /* Suitable for reserving if this block is movable */
3884 if (block_migratetype == MIGRATE_MOVABLE) {
3885 set_pageblock_migratetype(page,
3886 MIGRATE_RESERVE);
3887 move_freepages_block(zone, page,
3888 MIGRATE_RESERVE);
3889 reserve--;
3890 continue;
3891 }
56fd56b8
MG
3892 }
3893
3894 /*
3895 * If the reserve is met and this is a previous reserved block,
3896 * take it back
3897 */
3898 if (block_migratetype == MIGRATE_RESERVE) {
3899 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3900 move_freepages_block(zone, page, MIGRATE_MOVABLE);
3901 }
3902 }
3903}
ac0e5b7a 3904
1da177e4
LT
3905/*
3906 * Initially all pages are reserved - free ones are freed
3907 * up by free_all_bootmem() once the early boot process is
3908 * done. Non-atomic initialization, single-pass.
3909 */
c09b4240 3910void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
a2f3aa02 3911 unsigned long start_pfn, enum memmap_context context)
1da177e4 3912{
1da177e4 3913 struct page *page;
29751f69
AW
3914 unsigned long end_pfn = start_pfn + size;
3915 unsigned long pfn;
86051ca5 3916 struct zone *z;
1da177e4 3917
22b31eec
HD
3918 if (highest_memmap_pfn < end_pfn - 1)
3919 highest_memmap_pfn = end_pfn - 1;
3920
86051ca5 3921 z = &NODE_DATA(nid)->node_zones[zone];
cbe8dd4a 3922 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
a2f3aa02
DH
3923 /*
3924 * There can be holes in boot-time mem_map[]s
3925 * handed to this function. They do not
3926 * exist on hotplugged memory.
3927 */
3928 if (context == MEMMAP_EARLY) {
3929 if (!early_pfn_valid(pfn))
3930 continue;
3931 if (!early_pfn_in_nid(pfn, nid))
3932 continue;
3933 }
d41dee36
AW
3934 page = pfn_to_page(pfn);
3935 set_page_links(page, zone, nid, pfn);
708614e6 3936 mminit_verify_page_links(page, zone, nid, pfn);
7835e98b 3937 init_page_count(page);
22b751c3
MG
3938 page_mapcount_reset(page);
3939 page_nid_reset_last(page);
1da177e4 3940 SetPageReserved(page);
b2a0ac88
MG
3941 /*
3942 * Mark the block movable so that blocks are reserved for
3943 * movable at startup. This will force kernel allocations
3944 * to reserve their blocks rather than leaking throughout
3945 * the address space during boot when many long-lived
56fd56b8
MG
3946 * kernel allocations are made. Later some blocks near
3947 * the start are marked MIGRATE_RESERVE by
3948 * setup_zone_migrate_reserve()
86051ca5
KH
3949 *
3950 * bitmap is created for zone's valid pfn range. but memmap
3951 * can be created for invalid pages (for alignment)
3952 * check here not to call set_pageblock_migratetype() against
3953 * pfn out of zone.
b2a0ac88 3954 */
86051ca5 3955 if ((z->zone_start_pfn <= pfn)
108bcc96 3956 && (pfn < zone_end_pfn(z))
86051ca5 3957 && !(pfn & (pageblock_nr_pages - 1)))
56fd56b8 3958 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
b2a0ac88 3959
1da177e4
LT
3960 INIT_LIST_HEAD(&page->lru);
3961#ifdef WANT_PAGE_VIRTUAL
3962 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
3963 if (!is_highmem_idx(zone))
3212c6be 3964 set_page_address(page, __va(pfn << PAGE_SHIFT));
1da177e4 3965#endif
1da177e4
LT
3966 }
3967}
3968
1e548deb 3969static void __meminit zone_init_free_lists(struct zone *zone)
1da177e4 3970{
b2a0ac88
MG
3971 int order, t;
3972 for_each_migratetype_order(order, t) {
3973 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
3974 zone->free_area[order].nr_free = 0;
3975 }
3976}
3977
3978#ifndef __HAVE_ARCH_MEMMAP_INIT
3979#define memmap_init(size, nid, zone, start_pfn) \
a2f3aa02 3980 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1da177e4
LT
3981#endif
3982
4ed7e022 3983static int __meminit zone_batchsize(struct zone *zone)
e7c8d5c9 3984{
3a6be87f 3985#ifdef CONFIG_MMU
e7c8d5c9
CL
3986 int batch;
3987
3988 /*
3989 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 3990 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
3991 *
3992 * OK, so we don't know how big the cache is. So guess.
3993 */
b40da049 3994 batch = zone->managed_pages / 1024;
ba56e91c
SR
3995 if (batch * PAGE_SIZE > 512 * 1024)
3996 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
3997 batch /= 4; /* We effectively *= 4 below */
3998 if (batch < 1)
3999 batch = 1;
4000
4001 /*
0ceaacc9
NP
4002 * Clamp the batch to a 2^n - 1 value. Having a power
4003 * of 2 value was found to be more likely to have
4004 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 4005 *
0ceaacc9
NP
4006 * For example if 2 tasks are alternately allocating
4007 * batches of pages, one task can end up with a lot
4008 * of pages of one half of the possible page colors
4009 * and the other with pages of the other colors.
e7c8d5c9 4010 */
9155203a 4011 batch = rounddown_pow_of_two(batch + batch/2) - 1;
ba56e91c 4012
e7c8d5c9 4013 return batch;
3a6be87f
DH
4014
4015#else
4016 /* The deferral and batching of frees should be suppressed under NOMMU
4017 * conditions.
4018 *
4019 * The problem is that NOMMU needs to be able to allocate large chunks
4020 * of contiguous memory as there's no hardware page translation to
4021 * assemble apparent contiguous memory from discontiguous pages.
4022 *
4023 * Queueing large contiguous runs of pages for batching, however,
4024 * causes the pages to actually be freed in smaller chunks. As there
4025 * can be a significant delay between the individual batches being
4026 * recycled, this leads to the once large chunks of space being
4027 * fragmented and becoming unavailable for high-order allocations.
4028 */
4029 return 0;
4030#endif
e7c8d5c9
CL
4031}
4032
b69a7288 4033static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2caaad41
CL
4034{
4035 struct per_cpu_pages *pcp;
5f8dcc21 4036 int migratetype;
2caaad41 4037
1c6fe946
MD
4038 memset(p, 0, sizeof(*p));
4039
3dfa5721 4040 pcp = &p->pcp;
2caaad41 4041 pcp->count = 0;
2caaad41
CL
4042 pcp->high = 6 * batch;
4043 pcp->batch = max(1UL, 1 * batch);
5f8dcc21
MG
4044 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
4045 INIT_LIST_HEAD(&pcp->lists[migratetype]);
2caaad41
CL
4046}
4047
8ad4b1fb
RS
4048/*
4049 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
4050 * to the value high for the pageset p.
4051 */
4052
4053static void setup_pagelist_highmark(struct per_cpu_pageset *p,
4054 unsigned long high)
4055{
4056 struct per_cpu_pages *pcp;
4057
3dfa5721 4058 pcp = &p->pcp;
8ad4b1fb
RS
4059 pcp->high = high;
4060 pcp->batch = max(1UL, high/4);
4061 if ((high/4) > (PAGE_SHIFT * 8))
4062 pcp->batch = PAGE_SHIFT * 8;
4063}
4064
4ed7e022 4065static void __meminit setup_zone_pageset(struct zone *zone)
319774e2
WF
4066{
4067 int cpu;
4068
4069 zone->pageset = alloc_percpu(struct per_cpu_pageset);
4070
4071 for_each_possible_cpu(cpu) {
4072 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
4073
4074 setup_pageset(pcp, zone_batchsize(zone));
4075
4076 if (percpu_pagelist_fraction)
4077 setup_pagelist_highmark(pcp,
b40da049 4078 (zone->managed_pages /
319774e2
WF
4079 percpu_pagelist_fraction));
4080 }
4081}
4082
2caaad41 4083/*
99dcc3e5
CL
4084 * Allocate per cpu pagesets and initialize them.
4085 * Before this call only boot pagesets were available.
e7c8d5c9 4086 */
99dcc3e5 4087void __init setup_per_cpu_pageset(void)
e7c8d5c9 4088{
99dcc3e5 4089 struct zone *zone;
e7c8d5c9 4090
319774e2
WF
4091 for_each_populated_zone(zone)
4092 setup_zone_pageset(zone);
e7c8d5c9
CL
4093}
4094
577a32f6 4095static noinline __init_refok
cca448fe 4096int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
ed8ece2e
DH
4097{
4098 int i;
4099 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe 4100 size_t alloc_size;
ed8ece2e
DH
4101
4102 /*
4103 * The per-page waitqueue mechanism uses hashed waitqueues
4104 * per zone.
4105 */
02b694de
YG
4106 zone->wait_table_hash_nr_entries =
4107 wait_table_hash_nr_entries(zone_size_pages);
4108 zone->wait_table_bits =
4109 wait_table_bits(zone->wait_table_hash_nr_entries);
cca448fe
YG
4110 alloc_size = zone->wait_table_hash_nr_entries
4111 * sizeof(wait_queue_head_t);
4112
cd94b9db 4113 if (!slab_is_available()) {
cca448fe 4114 zone->wait_table = (wait_queue_head_t *)
8f389a99 4115 alloc_bootmem_node_nopanic(pgdat, alloc_size);
cca448fe
YG
4116 } else {
4117 /*
4118 * This case means that a zone whose size was 0 gets new memory
4119 * via memory hot-add.
4120 * But it may be the case that a new node was hot-added. In
4121 * this case vmalloc() will not be able to use this new node's
4122 * memory - this wait_table must be initialized to use this new
4123 * node itself as well.
4124 * To use this new node's memory, further consideration will be
4125 * necessary.
4126 */
8691f3a7 4127 zone->wait_table = vmalloc(alloc_size);
cca448fe
YG
4128 }
4129 if (!zone->wait_table)
4130 return -ENOMEM;
ed8ece2e 4131
02b694de 4132 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
ed8ece2e 4133 init_waitqueue_head(zone->wait_table + i);
cca448fe
YG
4134
4135 return 0;
ed8ece2e
DH
4136}
4137
c09b4240 4138static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e 4139{
99dcc3e5
CL
4140 /*
4141 * per cpu subsystem is not up at this point. The following code
4142 * relies on the ability of the linker to provide the
4143 * offset of a (static) per cpu variable into the per cpu area.
4144 */
4145 zone->pageset = &boot_pageset;
ed8ece2e 4146
f5335c0f 4147 if (zone->present_pages)
99dcc3e5
CL
4148 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
4149 zone->name, zone->present_pages,
4150 zone_batchsize(zone));
ed8ece2e
DH
4151}
4152
4ed7e022 4153int __meminit init_currently_empty_zone(struct zone *zone,
718127cc 4154 unsigned long zone_start_pfn,
a2f3aa02
DH
4155 unsigned long size,
4156 enum memmap_context context)
ed8ece2e
DH
4157{
4158 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe
YG
4159 int ret;
4160 ret = zone_wait_table_init(zone, size);
4161 if (ret)
4162 return ret;
ed8ece2e
DH
4163 pgdat->nr_zones = zone_idx(zone) + 1;
4164
ed8ece2e
DH
4165 zone->zone_start_pfn = zone_start_pfn;
4166
708614e6
MG
4167 mminit_dprintk(MMINIT_TRACE, "memmap_init",
4168 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
4169 pgdat->node_id,
4170 (unsigned long)zone_idx(zone),
4171 zone_start_pfn, (zone_start_pfn + size));
4172
1e548deb 4173 zone_init_free_lists(zone);
718127cc
YG
4174
4175 return 0;
ed8ece2e
DH
4176}
4177
0ee332c1 4178#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
c713216d
MG
4179#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
4180/*
4181 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
4182 * Architectures may implement their own version but if add_active_range()
4183 * was used and there are no special requirements, this is a convenient
4184 * alternative
4185 */
f2dbcfa7 4186int __meminit __early_pfn_to_nid(unsigned long pfn)
c713216d 4187{
c13291a5
TH
4188 unsigned long start_pfn, end_pfn;
4189 int i, nid;
7c243c71
RA
4190 /*
4191 * NOTE: The following SMP-unsafe globals are only used early in boot
4192 * when the kernel is running single-threaded.
4193 */
4194 static unsigned long __meminitdata last_start_pfn, last_end_pfn;
4195 static int __meminitdata last_nid;
4196
4197 if (last_start_pfn <= pfn && pfn < last_end_pfn)
4198 return last_nid;
c713216d 4199
c13291a5 4200 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
7c243c71
RA
4201 if (start_pfn <= pfn && pfn < end_pfn) {
4202 last_start_pfn = start_pfn;
4203 last_end_pfn = end_pfn;
4204 last_nid = nid;
c13291a5 4205 return nid;
7c243c71 4206 }
cc2559bc
KH
4207 /* This is a memory hole */
4208 return -1;
c713216d
MG
4209}
4210#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
4211
f2dbcfa7
KH
4212int __meminit early_pfn_to_nid(unsigned long pfn)
4213{
cc2559bc
KH
4214 int nid;
4215
4216 nid = __early_pfn_to_nid(pfn);
4217 if (nid >= 0)
4218 return nid;
4219 /* just returns 0 */
4220 return 0;
f2dbcfa7
KH
4221}
4222
cc2559bc
KH
4223#ifdef CONFIG_NODES_SPAN_OTHER_NODES
4224bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
4225{
4226 int nid;
4227
4228 nid = __early_pfn_to_nid(pfn);
4229 if (nid >= 0 && nid != node)
4230 return false;
4231 return true;
4232}
4233#endif
f2dbcfa7 4234
c713216d
MG
4235/**
4236 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
88ca3b94
RD
4237 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
4238 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
c713216d
MG
4239 *
4240 * If an architecture guarantees that all ranges registered with
4241 * add_active_ranges() contain no holes and may be freed, this
4242 * this function may be used instead of calling free_bootmem() manually.
4243 */
c13291a5 4244void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
cc289894 4245{
c13291a5
TH
4246 unsigned long start_pfn, end_pfn;
4247 int i, this_nid;
edbe7d23 4248
c13291a5
TH
4249 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
4250 start_pfn = min(start_pfn, max_low_pfn);
4251 end_pfn = min(end_pfn, max_low_pfn);
edbe7d23 4252
c13291a5
TH
4253 if (start_pfn < end_pfn)
4254 free_bootmem_node(NODE_DATA(this_nid),
4255 PFN_PHYS(start_pfn),
4256 (end_pfn - start_pfn) << PAGE_SHIFT);
edbe7d23 4257 }
edbe7d23 4258}
edbe7d23 4259
c713216d
MG
4260/**
4261 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 4262 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d
MG
4263 *
4264 * If an architecture guarantees that all ranges registered with
4265 * add_active_ranges() contain no holes and may be freed, this
88ca3b94 4266 * function may be used instead of calling memory_present() manually.
c713216d
MG
4267 */
4268void __init sparse_memory_present_with_active_regions(int nid)
4269{
c13291a5
TH
4270 unsigned long start_pfn, end_pfn;
4271 int i, this_nid;
c713216d 4272
c13291a5
TH
4273 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
4274 memory_present(this_nid, start_pfn, end_pfn);
c713216d
MG
4275}
4276
4277/**
4278 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
4279 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
4280 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
4281 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
4282 *
4283 * It returns the start and end page frame of a node based on information
4284 * provided by an arch calling add_active_range(). If called for a node
4285 * with no available memory, a warning is printed and the start and end
88ca3b94 4286 * PFNs will be 0.
c713216d 4287 */
a3142c8e 4288void __meminit get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
4289 unsigned long *start_pfn, unsigned long *end_pfn)
4290{
c13291a5 4291 unsigned long this_start_pfn, this_end_pfn;
c713216d 4292 int i;
c13291a5 4293
c713216d
MG
4294 *start_pfn = -1UL;
4295 *end_pfn = 0;
4296
c13291a5
TH
4297 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
4298 *start_pfn = min(*start_pfn, this_start_pfn);
4299 *end_pfn = max(*end_pfn, this_end_pfn);
c713216d
MG
4300 }
4301
633c0666 4302 if (*start_pfn == -1UL)
c713216d 4303 *start_pfn = 0;
c713216d
MG
4304}
4305
2a1e274a
MG
4306/*
4307 * This finds a zone that can be used for ZONE_MOVABLE pages. The
4308 * assumption is made that zones within a node are ordered in monotonic
4309 * increasing memory addresses so that the "highest" populated zone is used
4310 */
b69a7288 4311static void __init find_usable_zone_for_movable(void)
2a1e274a
MG
4312{
4313 int zone_index;
4314 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
4315 if (zone_index == ZONE_MOVABLE)
4316 continue;
4317
4318 if (arch_zone_highest_possible_pfn[zone_index] >
4319 arch_zone_lowest_possible_pfn[zone_index])
4320 break;
4321 }
4322
4323 VM_BUG_ON(zone_index == -1);
4324 movable_zone = zone_index;
4325}
4326
4327/*
4328 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
25985edc 4329 * because it is sized independent of architecture. Unlike the other zones,
2a1e274a
MG
4330 * the starting point for ZONE_MOVABLE is not fixed. It may be different
4331 * in each node depending on the size of each node and how evenly kernelcore
4332 * is distributed. This helper function adjusts the zone ranges
4333 * provided by the architecture for a given node by using the end of the
4334 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4335 * zones within a node are in order of monotonic increases memory addresses
4336 */
b69a7288 4337static void __meminit adjust_zone_range_for_zone_movable(int nid,
2a1e274a
MG
4338 unsigned long zone_type,
4339 unsigned long node_start_pfn,
4340 unsigned long node_end_pfn,
4341 unsigned long *zone_start_pfn,
4342 unsigned long *zone_end_pfn)
4343{
4344 /* Only adjust if ZONE_MOVABLE is on this node */
4345 if (zone_movable_pfn[nid]) {
4346 /* Size ZONE_MOVABLE */
4347 if (zone_type == ZONE_MOVABLE) {
4348 *zone_start_pfn = zone_movable_pfn[nid];
4349 *zone_end_pfn = min(node_end_pfn,
4350 arch_zone_highest_possible_pfn[movable_zone]);
4351
4352 /* Adjust for ZONE_MOVABLE starting within this range */
4353 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
4354 *zone_end_pfn > zone_movable_pfn[nid]) {
4355 *zone_end_pfn = zone_movable_pfn[nid];
4356
4357 /* Check if this whole range is within ZONE_MOVABLE */
4358 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
4359 *zone_start_pfn = *zone_end_pfn;
4360 }
4361}
4362
c713216d
MG
4363/*
4364 * Return the number of pages a zone spans in a node, including holes
4365 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
4366 */
6ea6e688 4367static unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
4368 unsigned long zone_type,
4369 unsigned long *ignored)
4370{
4371 unsigned long node_start_pfn, node_end_pfn;
4372 unsigned long zone_start_pfn, zone_end_pfn;
4373
4374 /* Get the start and end of the node and zone */
4375 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4376 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4377 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2a1e274a
MG
4378 adjust_zone_range_for_zone_movable(nid, zone_type,
4379 node_start_pfn, node_end_pfn,
4380 &zone_start_pfn, &zone_end_pfn);
c713216d
MG
4381
4382 /* Check that this node has pages within the zone's required range */
4383 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4384 return 0;
4385
4386 /* Move the zone boundaries inside the node if necessary */
4387 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4388 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4389
4390 /* Return the spanned pages */
4391 return zone_end_pfn - zone_start_pfn;
4392}
4393
4394/*
4395 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 4396 * then all holes in the requested range will be accounted for.
c713216d 4397 */
32996250 4398unsigned long __meminit __absent_pages_in_range(int nid,
c713216d
MG
4399 unsigned long range_start_pfn,
4400 unsigned long range_end_pfn)
4401{
96e907d1
TH
4402 unsigned long nr_absent = range_end_pfn - range_start_pfn;
4403 unsigned long start_pfn, end_pfn;
4404 int i;
c713216d 4405
96e907d1
TH
4406 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4407 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
4408 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
4409 nr_absent -= end_pfn - start_pfn;
c713216d 4410 }
96e907d1 4411 return nr_absent;
c713216d
MG
4412}
4413
4414/**
4415 * absent_pages_in_range - Return number of page frames in holes within a range
4416 * @start_pfn: The start PFN to start searching for holes
4417 * @end_pfn: The end PFN to stop searching for holes
4418 *
88ca3b94 4419 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
4420 */
4421unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4422 unsigned long end_pfn)
4423{
4424 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4425}
4426
4427/* Return the number of page frames in holes in a zone on a node */
6ea6e688 4428static unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
4429 unsigned long zone_type,
4430 unsigned long *ignored)
4431{
96e907d1
TH
4432 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
4433 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
9c7cd687
MG
4434 unsigned long node_start_pfn, node_end_pfn;
4435 unsigned long zone_start_pfn, zone_end_pfn;
4436
4437 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
96e907d1
TH
4438 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
4439 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
9c7cd687 4440
2a1e274a
MG
4441 adjust_zone_range_for_zone_movable(nid, zone_type,
4442 node_start_pfn, node_end_pfn,
4443 &zone_start_pfn, &zone_end_pfn);
9c7cd687 4444 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
c713216d 4445}
0e0b864e 4446
0ee332c1 4447#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6ea6e688 4448static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
4449 unsigned long zone_type,
4450 unsigned long *zones_size)
4451{
4452 return zones_size[zone_type];
4453}
4454
6ea6e688 4455static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
4456 unsigned long zone_type,
4457 unsigned long *zholes_size)
4458{
4459 if (!zholes_size)
4460 return 0;
4461
4462 return zholes_size[zone_type];
4463}
20e6926d 4464
0ee332c1 4465#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 4466
a3142c8e 4467static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
c713216d
MG
4468 unsigned long *zones_size, unsigned long *zholes_size)
4469{
4470 unsigned long realtotalpages, totalpages = 0;
4471 enum zone_type i;
4472
4473 for (i = 0; i < MAX_NR_ZONES; i++)
4474 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4475 zones_size);
4476 pgdat->node_spanned_pages = totalpages;
4477
4478 realtotalpages = totalpages;
4479 for (i = 0; i < MAX_NR_ZONES; i++)
4480 realtotalpages -=
4481 zone_absent_pages_in_node(pgdat->node_id, i,
4482 zholes_size);
4483 pgdat->node_present_pages = realtotalpages;
4484 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4485 realtotalpages);
4486}
4487
835c134e
MG
4488#ifndef CONFIG_SPARSEMEM
4489/*
4490 * Calculate the size of the zone->blockflags rounded to an unsigned long
d9c23400
MG
4491 * Start by making sure zonesize is a multiple of pageblock_order by rounding
4492 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
835c134e
MG
4493 * round what is now in bits to nearest long in bits, then return it in
4494 * bytes.
4495 */
7c45512d 4496static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
835c134e
MG
4497{
4498 unsigned long usemapsize;
4499
7c45512d 4500 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
d9c23400
MG
4501 usemapsize = roundup(zonesize, pageblock_nr_pages);
4502 usemapsize = usemapsize >> pageblock_order;
835c134e
MG
4503 usemapsize *= NR_PAGEBLOCK_BITS;
4504 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4505
4506 return usemapsize / 8;
4507}
4508
4509static void __init setup_usemap(struct pglist_data *pgdat,
7c45512d
LT
4510 struct zone *zone,
4511 unsigned long zone_start_pfn,
4512 unsigned long zonesize)
835c134e 4513{
7c45512d 4514 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
835c134e 4515 zone->pageblock_flags = NULL;
58a01a45 4516 if (usemapsize)
8f389a99
YL
4517 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4518 usemapsize);
835c134e
MG
4519}
4520#else
7c45512d
LT
4521static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
4522 unsigned long zone_start_pfn, unsigned long zonesize) {}
835c134e
MG
4523#endif /* CONFIG_SPARSEMEM */
4524
d9c23400 4525#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
ba72cb8c 4526
d9c23400 4527/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
ca57df79 4528void __init set_pageblock_order(void)
d9c23400 4529{
955c1cd7
AM
4530 unsigned int order;
4531
d9c23400
MG
4532 /* Check that pageblock_nr_pages has not already been setup */
4533 if (pageblock_order)
4534 return;
4535
955c1cd7
AM
4536 if (HPAGE_SHIFT > PAGE_SHIFT)
4537 order = HUGETLB_PAGE_ORDER;
4538 else
4539 order = MAX_ORDER - 1;
4540
d9c23400
MG
4541 /*
4542 * Assume the largest contiguous order of interest is a huge page.
955c1cd7
AM
4543 * This value may be variable depending on boot parameters on IA64 and
4544 * powerpc.
d9c23400
MG
4545 */
4546 pageblock_order = order;
4547}
4548#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4549
ba72cb8c
MG
4550/*
4551 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
955c1cd7
AM
4552 * is unused as pageblock_order is set at compile-time. See
4553 * include/linux/pageblock-flags.h for the values of pageblock_order based on
4554 * the kernel config
ba72cb8c 4555 */
ca57df79 4556void __init set_pageblock_order(void)
ba72cb8c 4557{
ba72cb8c 4558}
d9c23400
MG
4559
4560#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4561
01cefaef
JL
4562static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
4563 unsigned long present_pages)
4564{
4565 unsigned long pages = spanned_pages;
4566
4567 /*
4568 * Provide a more accurate estimation if there are holes within
4569 * the zone and SPARSEMEM is in use. If there are holes within the
4570 * zone, each populated memory region may cost us one or two extra
4571 * memmap pages due to alignment because memmap pages for each
4572 * populated regions may not naturally algined on page boundary.
4573 * So the (present_pages >> 4) heuristic is a tradeoff for that.
4574 */
4575 if (spanned_pages > present_pages + (present_pages >> 4) &&
4576 IS_ENABLED(CONFIG_SPARSEMEM))
4577 pages = present_pages;
4578
4579 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
4580}
4581
1da177e4
LT
4582/*
4583 * Set up the zone data structures:
4584 * - mark all pages reserved
4585 * - mark all memory queues empty
4586 * - clear the memory bitmaps
6527af5d
MK
4587 *
4588 * NOTE: pgdat should get zeroed by caller.
1da177e4 4589 */
b5a0e011 4590static void __paginginit free_area_init_core(struct pglist_data *pgdat,
1da177e4
LT
4591 unsigned long *zones_size, unsigned long *zholes_size)
4592{
2f1b6248 4593 enum zone_type j;
ed8ece2e 4594 int nid = pgdat->node_id;
1da177e4 4595 unsigned long zone_start_pfn = pgdat->node_start_pfn;
718127cc 4596 int ret;
1da177e4 4597
208d54e5 4598 pgdat_resize_init(pgdat);
8177a420
AA
4599#ifdef CONFIG_NUMA_BALANCING
4600 spin_lock_init(&pgdat->numabalancing_migrate_lock);
4601 pgdat->numabalancing_migrate_nr_pages = 0;
4602 pgdat->numabalancing_migrate_next_window = jiffies;
4603#endif
1da177e4 4604 init_waitqueue_head(&pgdat->kswapd_wait);
5515061d 4605 init_waitqueue_head(&pgdat->pfmemalloc_wait);
52d4b9ac 4606 pgdat_page_cgroup_init(pgdat);
5f63b720 4607
1da177e4
LT
4608 for (j = 0; j < MAX_NR_ZONES; j++) {
4609 struct zone *zone = pgdat->node_zones + j;
9feedc9d 4610 unsigned long size, realsize, freesize, memmap_pages;
1da177e4 4611
c713216d 4612 size = zone_spanned_pages_in_node(nid, j, zones_size);
9feedc9d 4613 realsize = freesize = size - zone_absent_pages_in_node(nid, j,
c713216d 4614 zholes_size);
1da177e4 4615
0e0b864e 4616 /*
9feedc9d 4617 * Adjust freesize so that it accounts for how much memory
0e0b864e
MG
4618 * is used by this zone for memmap. This affects the watermark
4619 * and per-cpu initialisations
4620 */
01cefaef 4621 memmap_pages = calc_memmap_size(size, realsize);
9feedc9d
JL
4622 if (freesize >= memmap_pages) {
4623 freesize -= memmap_pages;
5594c8c8
YL
4624 if (memmap_pages)
4625 printk(KERN_DEBUG
4626 " %s zone: %lu pages used for memmap\n",
4627 zone_names[j], memmap_pages);
0e0b864e
MG
4628 } else
4629 printk(KERN_WARNING
9feedc9d
JL
4630 " %s zone: %lu pages exceeds freesize %lu\n",
4631 zone_names[j], memmap_pages, freesize);
0e0b864e 4632
6267276f 4633 /* Account for reserved pages */
9feedc9d
JL
4634 if (j == 0 && freesize > dma_reserve) {
4635 freesize -= dma_reserve;
d903ef9f 4636 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
6267276f 4637 zone_names[0], dma_reserve);
0e0b864e
MG
4638 }
4639
98d2b0eb 4640 if (!is_highmem_idx(j))
9feedc9d 4641 nr_kernel_pages += freesize;
01cefaef
JL
4642 /* Charge for highmem memmap if there are enough kernel pages */
4643 else if (nr_kernel_pages > memmap_pages * 2)
4644 nr_kernel_pages -= memmap_pages;
9feedc9d 4645 nr_all_pages += freesize;
1da177e4
LT
4646
4647 zone->spanned_pages = size;
306f2e9e 4648 zone->present_pages = realsize;
9feedc9d
JL
4649 /*
4650 * Set an approximate value for lowmem here, it will be adjusted
4651 * when the bootmem allocator frees pages into the buddy system.
4652 * And all highmem pages will be managed by the buddy system.
4653 */
4654 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
9614634f 4655#ifdef CONFIG_NUMA
d5f541ed 4656 zone->node = nid;
9feedc9d 4657 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
9614634f 4658 / 100;
9feedc9d 4659 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
9614634f 4660#endif
1da177e4
LT
4661 zone->name = zone_names[j];
4662 spin_lock_init(&zone->lock);
4663 spin_lock_init(&zone->lru_lock);
bdc8cb98 4664 zone_seqlock_init(zone);
1da177e4 4665 zone->zone_pgdat = pgdat;
1da177e4 4666
ed8ece2e 4667 zone_pcp_init(zone);
bea8c150 4668 lruvec_init(&zone->lruvec);
1da177e4
LT
4669 if (!size)
4670 continue;
4671
955c1cd7 4672 set_pageblock_order();
7c45512d 4673 setup_usemap(pgdat, zone, zone_start_pfn, size);
a2f3aa02
DH
4674 ret = init_currently_empty_zone(zone, zone_start_pfn,
4675 size, MEMMAP_EARLY);
718127cc 4676 BUG_ON(ret);
76cdd58e 4677 memmap_init(size, nid, j, zone_start_pfn);
1da177e4 4678 zone_start_pfn += size;
1da177e4
LT
4679 }
4680}
4681
577a32f6 4682static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 4683{
1da177e4
LT
4684 /* Skip empty nodes */
4685 if (!pgdat->node_spanned_pages)
4686 return;
4687
d41dee36 4688#ifdef CONFIG_FLAT_NODE_MEM_MAP
1da177e4
LT
4689 /* ia64 gets its own node_mem_map, before this, without bootmem */
4690 if (!pgdat->node_mem_map) {
e984bb43 4691 unsigned long size, start, end;
d41dee36
AW
4692 struct page *map;
4693
e984bb43
BP
4694 /*
4695 * The zone's endpoints aren't required to be MAX_ORDER
4696 * aligned but the node_mem_map endpoints must be in order
4697 * for the buddy allocator to function correctly.
4698 */
4699 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
108bcc96 4700 end = pgdat_end_pfn(pgdat);
e984bb43
BP
4701 end = ALIGN(end, MAX_ORDER_NR_PAGES);
4702 size = (end - start) * sizeof(struct page);
6f167ec7
DH
4703 map = alloc_remap(pgdat->node_id, size);
4704 if (!map)
8f389a99 4705 map = alloc_bootmem_node_nopanic(pgdat, size);
e984bb43 4706 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
1da177e4 4707 }
12d810c1 4708#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
4709 /*
4710 * With no DISCONTIG, the global mem_map is just set as node 0's
4711 */
c713216d 4712 if (pgdat == NODE_DATA(0)) {
1da177e4 4713 mem_map = NODE_DATA(0)->node_mem_map;
0ee332c1 4714#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
c713216d 4715 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
467bc461 4716 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
0ee332c1 4717#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 4718 }
1da177e4 4719#endif
d41dee36 4720#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
4721}
4722
9109fb7b
JW
4723void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4724 unsigned long node_start_pfn, unsigned long *zholes_size)
1da177e4 4725{
9109fb7b
JW
4726 pg_data_t *pgdat = NODE_DATA(nid);
4727
88fdf75d 4728 /* pg_data_t should be reset to zero when it's allocated */
8783b6e2 4729 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
88fdf75d 4730
1da177e4
LT
4731 pgdat->node_id = nid;
4732 pgdat->node_start_pfn = node_start_pfn;
957f822a 4733 init_zone_allows_reclaim(nid);
c713216d 4734 calculate_node_totalpages(pgdat, zones_size, zholes_size);
1da177e4
LT
4735
4736 alloc_node_mem_map(pgdat);
e8c27ac9
YL
4737#ifdef CONFIG_FLAT_NODE_MEM_MAP
4738 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4739 nid, (unsigned long)pgdat,
4740 (unsigned long)pgdat->node_mem_map);
4741#endif
1da177e4
LT
4742
4743 free_area_init_core(pgdat, zones_size, zholes_size);
4744}
4745
0ee332c1 4746#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
418508c1
MS
4747
4748#if MAX_NUMNODES > 1
4749/*
4750 * Figure out the number of possible node ids.
4751 */
f9872caf 4752void __init setup_nr_node_ids(void)
418508c1
MS
4753{
4754 unsigned int node;
4755 unsigned int highest = 0;
4756
4757 for_each_node_mask(node, node_possible_map)
4758 highest = node;
4759 nr_node_ids = highest + 1;
4760}
418508c1
MS
4761#endif
4762
1e01979c
TH
4763/**
4764 * node_map_pfn_alignment - determine the maximum internode alignment
4765 *
4766 * This function should be called after node map is populated and sorted.
4767 * It calculates the maximum power of two alignment which can distinguish
4768 * all the nodes.
4769 *
4770 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
4771 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
4772 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
4773 * shifted, 1GiB is enough and this function will indicate so.
4774 *
4775 * This is used to test whether pfn -> nid mapping of the chosen memory
4776 * model has fine enough granularity to avoid incorrect mapping for the
4777 * populated node map.
4778 *
4779 * Returns the determined alignment in pfn's. 0 if there is no alignment
4780 * requirement (single node).
4781 */
4782unsigned long __init node_map_pfn_alignment(void)
4783{
4784 unsigned long accl_mask = 0, last_end = 0;
c13291a5 4785 unsigned long start, end, mask;
1e01979c 4786 int last_nid = -1;
c13291a5 4787 int i, nid;
1e01979c 4788
c13291a5 4789 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1e01979c
TH
4790 if (!start || last_nid < 0 || last_nid == nid) {
4791 last_nid = nid;
4792 last_end = end;
4793 continue;
4794 }
4795
4796 /*
4797 * Start with a mask granular enough to pin-point to the
4798 * start pfn and tick off bits one-by-one until it becomes
4799 * too coarse to separate the current node from the last.
4800 */
4801 mask = ~((1 << __ffs(start)) - 1);
4802 while (mask && last_end <= (start & (mask << 1)))
4803 mask <<= 1;
4804
4805 /* accumulate all internode masks */
4806 accl_mask |= mask;
4807 }
4808
4809 /* convert mask to number of pages */
4810 return ~accl_mask + 1;
4811}
4812
a6af2bc3 4813/* Find the lowest pfn for a node */
b69a7288 4814static unsigned long __init find_min_pfn_for_node(int nid)
c713216d 4815{
a6af2bc3 4816 unsigned long min_pfn = ULONG_MAX;
c13291a5
TH
4817 unsigned long start_pfn;
4818 int i;
1abbfb41 4819
c13291a5
TH
4820 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
4821 min_pfn = min(min_pfn, start_pfn);
c713216d 4822
a6af2bc3
MG
4823 if (min_pfn == ULONG_MAX) {
4824 printk(KERN_WARNING
2bc0d261 4825 "Could not find start_pfn for node %d\n", nid);
a6af2bc3
MG
4826 return 0;
4827 }
4828
4829 return min_pfn;
c713216d
MG
4830}
4831
4832/**
4833 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4834 *
4835 * It returns the minimum PFN based on information provided via
88ca3b94 4836 * add_active_range().
c713216d
MG
4837 */
4838unsigned long __init find_min_pfn_with_active_regions(void)
4839{
4840 return find_min_pfn_for_node(MAX_NUMNODES);
4841}
4842
37b07e41
LS
4843/*
4844 * early_calculate_totalpages()
4845 * Sum pages in active regions for movable zone.
4b0ef1fe 4846 * Populate N_MEMORY for calculating usable_nodes.
37b07e41 4847 */
484f51f8 4848static unsigned long __init early_calculate_totalpages(void)
7e63efef 4849{
7e63efef 4850 unsigned long totalpages = 0;
c13291a5
TH
4851 unsigned long start_pfn, end_pfn;
4852 int i, nid;
4853
4854 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4855 unsigned long pages = end_pfn - start_pfn;
7e63efef 4856
37b07e41
LS
4857 totalpages += pages;
4858 if (pages)
4b0ef1fe 4859 node_set_state(nid, N_MEMORY);
37b07e41
LS
4860 }
4861 return totalpages;
7e63efef
MG
4862}
4863
2a1e274a
MG
4864/*
4865 * Find the PFN the Movable zone begins in each node. Kernel memory
4866 * is spread evenly between nodes as long as the nodes have enough
4867 * memory. When they don't, some nodes will have more kernelcore than
4868 * others
4869 */
b224ef85 4870static void __init find_zone_movable_pfns_for_nodes(void)
2a1e274a
MG
4871{
4872 int i, nid;
4873 unsigned long usable_startpfn;
4874 unsigned long kernelcore_node, kernelcore_remaining;
66918dcd 4875 /* save the state before borrow the nodemask */
4b0ef1fe 4876 nodemask_t saved_node_state = node_states[N_MEMORY];
37b07e41 4877 unsigned long totalpages = early_calculate_totalpages();
4b0ef1fe 4878 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
2a1e274a 4879
7e63efef
MG
4880 /*
4881 * If movablecore was specified, calculate what size of
4882 * kernelcore that corresponds so that memory usable for
4883 * any allocation type is evenly spread. If both kernelcore
4884 * and movablecore are specified, then the value of kernelcore
4885 * will be used for required_kernelcore if it's greater than
4886 * what movablecore would have allowed.
4887 */
4888 if (required_movablecore) {
7e63efef
MG
4889 unsigned long corepages;
4890
4891 /*
4892 * Round-up so that ZONE_MOVABLE is at least as large as what
4893 * was requested by the user
4894 */
4895 required_movablecore =
4896 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4897 corepages = totalpages - required_movablecore;
4898
4899 required_kernelcore = max(required_kernelcore, corepages);
4900 }
4901
20e6926d
YL
4902 /* If kernelcore was not specified, there is no ZONE_MOVABLE */
4903 if (!required_kernelcore)
66918dcd 4904 goto out;
2a1e274a
MG
4905
4906 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
20e6926d 4907 find_usable_zone_for_movable();
2a1e274a
MG
4908 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4909
4910restart:
4911 /* Spread kernelcore memory as evenly as possible throughout nodes */
4912 kernelcore_node = required_kernelcore / usable_nodes;
4b0ef1fe 4913 for_each_node_state(nid, N_MEMORY) {
c13291a5
TH
4914 unsigned long start_pfn, end_pfn;
4915
2a1e274a
MG
4916 /*
4917 * Recalculate kernelcore_node if the division per node
4918 * now exceeds what is necessary to satisfy the requested
4919 * amount of memory for the kernel
4920 */
4921 if (required_kernelcore < kernelcore_node)
4922 kernelcore_node = required_kernelcore / usable_nodes;
4923
4924 /*
4925 * As the map is walked, we track how much memory is usable
4926 * by the kernel using kernelcore_remaining. When it is
4927 * 0, the rest of the node is usable by ZONE_MOVABLE
4928 */
4929 kernelcore_remaining = kernelcore_node;
4930
4931 /* Go through each range of PFNs within this node */
c13291a5 4932 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2a1e274a
MG
4933 unsigned long size_pages;
4934
c13291a5 4935 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
2a1e274a
MG
4936 if (start_pfn >= end_pfn)
4937 continue;
4938
4939 /* Account for what is only usable for kernelcore */
4940 if (start_pfn < usable_startpfn) {
4941 unsigned long kernel_pages;
4942 kernel_pages = min(end_pfn, usable_startpfn)
4943 - start_pfn;
4944
4945 kernelcore_remaining -= min(kernel_pages,
4946 kernelcore_remaining);
4947 required_kernelcore -= min(kernel_pages,
4948 required_kernelcore);
4949
4950 /* Continue if range is now fully accounted */
4951 if (end_pfn <= usable_startpfn) {
4952
4953 /*
4954 * Push zone_movable_pfn to the end so
4955 * that if we have to rebalance
4956 * kernelcore across nodes, we will
4957 * not double account here
4958 */
4959 zone_movable_pfn[nid] = end_pfn;
4960 continue;
4961 }
4962 start_pfn = usable_startpfn;
4963 }
4964
4965 /*
4966 * The usable PFN range for ZONE_MOVABLE is from
4967 * start_pfn->end_pfn. Calculate size_pages as the
4968 * number of pages used as kernelcore
4969 */
4970 size_pages = end_pfn - start_pfn;
4971 if (size_pages > kernelcore_remaining)
4972 size_pages = kernelcore_remaining;
4973 zone_movable_pfn[nid] = start_pfn + size_pages;
4974
4975 /*
4976 * Some kernelcore has been met, update counts and
4977 * break if the kernelcore for this node has been
4978 * satisified
4979 */
4980 required_kernelcore -= min(required_kernelcore,
4981 size_pages);
4982 kernelcore_remaining -= size_pages;
4983 if (!kernelcore_remaining)
4984 break;
4985 }
4986 }
4987
4988 /*
4989 * If there is still required_kernelcore, we do another pass with one
4990 * less node in the count. This will push zone_movable_pfn[nid] further
4991 * along on the nodes that still have memory until kernelcore is
4992 * satisified
4993 */
4994 usable_nodes--;
4995 if (usable_nodes && required_kernelcore > usable_nodes)
4996 goto restart;
4997
4998 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4999 for (nid = 0; nid < MAX_NUMNODES; nid++)
5000 zone_movable_pfn[nid] =
5001 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
66918dcd 5002
20e6926d 5003out:
66918dcd 5004 /* restore the node_state */
4b0ef1fe 5005 node_states[N_MEMORY] = saved_node_state;
2a1e274a
MG
5006}
5007
4b0ef1fe
LJ
5008/* Any regular or high memory on that node ? */
5009static void check_for_memory(pg_data_t *pgdat, int nid)
37b07e41 5010{
37b07e41
LS
5011 enum zone_type zone_type;
5012
4b0ef1fe
LJ
5013 if (N_MEMORY == N_NORMAL_MEMORY)
5014 return;
5015
5016 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
37b07e41 5017 struct zone *zone = &pgdat->node_zones[zone_type];
d0048b0e 5018 if (zone->present_pages) {
4b0ef1fe
LJ
5019 node_set_state(nid, N_HIGH_MEMORY);
5020 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
5021 zone_type <= ZONE_NORMAL)
5022 node_set_state(nid, N_NORMAL_MEMORY);
d0048b0e
BL
5023 break;
5024 }
37b07e41 5025 }
37b07e41
LS
5026}
5027
c713216d
MG
5028/**
5029 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 5030 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
5031 *
5032 * This will call free_area_init_node() for each active node in the system.
5033 * Using the page ranges provided by add_active_range(), the size of each
5034 * zone in each node and their holes is calculated. If the maximum PFN
5035 * between two adjacent zones match, it is assumed that the zone is empty.
5036 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
5037 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
5038 * starts where the previous one ended. For example, ZONE_DMA32 starts
5039 * at arch_max_dma_pfn.
5040 */
5041void __init free_area_init_nodes(unsigned long *max_zone_pfn)
5042{
c13291a5
TH
5043 unsigned long start_pfn, end_pfn;
5044 int i, nid;
a6af2bc3 5045
c713216d
MG
5046 /* Record where the zone boundaries are */
5047 memset(arch_zone_lowest_possible_pfn, 0,
5048 sizeof(arch_zone_lowest_possible_pfn));
5049 memset(arch_zone_highest_possible_pfn, 0,
5050 sizeof(arch_zone_highest_possible_pfn));
5051 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
5052 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
5053 for (i = 1; i < MAX_NR_ZONES; i++) {
2a1e274a
MG
5054 if (i == ZONE_MOVABLE)
5055 continue;
c713216d
MG
5056 arch_zone_lowest_possible_pfn[i] =
5057 arch_zone_highest_possible_pfn[i-1];
5058 arch_zone_highest_possible_pfn[i] =
5059 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
5060 }
2a1e274a
MG
5061 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
5062 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
5063
5064 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
5065 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
b224ef85 5066 find_zone_movable_pfns_for_nodes();
c713216d 5067
c713216d 5068 /* Print out the zone ranges */
a62e2f4f 5069 printk("Zone ranges:\n");
2a1e274a
MG
5070 for (i = 0; i < MAX_NR_ZONES; i++) {
5071 if (i == ZONE_MOVABLE)
5072 continue;
155cbfc8 5073 printk(KERN_CONT " %-8s ", zone_names[i]);
72f0ba02
DR
5074 if (arch_zone_lowest_possible_pfn[i] ==
5075 arch_zone_highest_possible_pfn[i])
155cbfc8 5076 printk(KERN_CONT "empty\n");
72f0ba02 5077 else
a62e2f4f
BH
5078 printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n",
5079 arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
5080 (arch_zone_highest_possible_pfn[i]
5081 << PAGE_SHIFT) - 1);
2a1e274a
MG
5082 }
5083
5084 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
a62e2f4f 5085 printk("Movable zone start for each node\n");
2a1e274a
MG
5086 for (i = 0; i < MAX_NUMNODES; i++) {
5087 if (zone_movable_pfn[i])
a62e2f4f
BH
5088 printk(" Node %d: %#010lx\n", i,
5089 zone_movable_pfn[i] << PAGE_SHIFT);
2a1e274a 5090 }
c713216d 5091
f2d52fe5 5092 /* Print out the early node map */
a62e2f4f 5093 printk("Early memory node ranges\n");
c13291a5 5094 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
a62e2f4f
BH
5095 printk(" node %3d: [mem %#010lx-%#010lx]\n", nid,
5096 start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
c713216d
MG
5097
5098 /* Initialise every node */
708614e6 5099 mminit_verify_pageflags_layout();
8ef82866 5100 setup_nr_node_ids();
c713216d
MG
5101 for_each_online_node(nid) {
5102 pg_data_t *pgdat = NODE_DATA(nid);
9109fb7b 5103 free_area_init_node(nid, NULL,
c713216d 5104 find_min_pfn_for_node(nid), NULL);
37b07e41
LS
5105
5106 /* Any memory on that node */
5107 if (pgdat->node_present_pages)
4b0ef1fe
LJ
5108 node_set_state(nid, N_MEMORY);
5109 check_for_memory(pgdat, nid);
c713216d
MG
5110 }
5111}
2a1e274a 5112
7e63efef 5113static int __init cmdline_parse_core(char *p, unsigned long *core)
2a1e274a
MG
5114{
5115 unsigned long long coremem;
5116 if (!p)
5117 return -EINVAL;
5118
5119 coremem = memparse(p, &p);
7e63efef 5120 *core = coremem >> PAGE_SHIFT;
2a1e274a 5121
7e63efef 5122 /* Paranoid check that UL is enough for the coremem value */
2a1e274a
MG
5123 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
5124
5125 return 0;
5126}
ed7ed365 5127
7e63efef
MG
5128/*
5129 * kernelcore=size sets the amount of memory for use for allocations that
5130 * cannot be reclaimed or migrated.
5131 */
5132static int __init cmdline_parse_kernelcore(char *p)
5133{
5134 return cmdline_parse_core(p, &required_kernelcore);
5135}
5136
5137/*
5138 * movablecore=size sets the amount of memory for use for allocations that
5139 * can be reclaimed or migrated.
5140 */
5141static int __init cmdline_parse_movablecore(char *p)
5142{
5143 return cmdline_parse_core(p, &required_movablecore);
5144}
5145
ed7ed365 5146early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 5147early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 5148
0ee332c1 5149#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 5150
69afade7
JL
5151unsigned long free_reserved_area(unsigned long start, unsigned long end,
5152 int poison, char *s)
5153{
5154 unsigned long pages, pos;
5155
5156 pos = start = PAGE_ALIGN(start);
5157 end &= PAGE_MASK;
5158 for (pages = 0; pos < end; pos += PAGE_SIZE, pages++) {
5159 if (poison)
5160 memset((void *)pos, poison, PAGE_SIZE);
bb3ec6b0 5161 free_reserved_page(virt_to_page((void *)pos));
69afade7
JL
5162 }
5163
5164 if (pages && s)
5165 pr_info("Freeing %s memory: %ldK (%lx - %lx)\n",
5166 s, pages << (PAGE_SHIFT - 10), start, end);
5167
5168 return pages;
5169}
5170
cfa11e08
JL
5171#ifdef CONFIG_HIGHMEM
5172void free_highmem_page(struct page *page)
5173{
5174 __free_reserved_page(page);
5175 totalram_pages++;
5176 totalhigh_pages++;
5177}
5178#endif
5179
0e0b864e 5180/**
88ca3b94
RD
5181 * set_dma_reserve - set the specified number of pages reserved in the first zone
5182 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e
MG
5183 *
5184 * The per-cpu batchsize and zone watermarks are determined by present_pages.
5185 * In the DMA zone, a significant percentage may be consumed by kernel image
5186 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
5187 * function may optionally be used to account for unfreeable pages in the
5188 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
5189 * smaller per-cpu batchsize.
0e0b864e
MG
5190 */
5191void __init set_dma_reserve(unsigned long new_dma_reserve)
5192{
5193 dma_reserve = new_dma_reserve;
5194}
5195
1da177e4
LT
5196void __init free_area_init(unsigned long *zones_size)
5197{
9109fb7b 5198 free_area_init_node(0, zones_size,
1da177e4
LT
5199 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
5200}
1da177e4 5201
1da177e4
LT
5202static int page_alloc_cpu_notify(struct notifier_block *self,
5203 unsigned long action, void *hcpu)
5204{
5205 int cpu = (unsigned long)hcpu;
1da177e4 5206
8bb78442 5207 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
f0cb3c76 5208 lru_add_drain_cpu(cpu);
9f8f2172
CL
5209 drain_pages(cpu);
5210
5211 /*
5212 * Spill the event counters of the dead processor
5213 * into the current processors event counters.
5214 * This artificially elevates the count of the current
5215 * processor.
5216 */
f8891e5e 5217 vm_events_fold_cpu(cpu);
9f8f2172
CL
5218
5219 /*
5220 * Zero the differential counters of the dead processor
5221 * so that the vm statistics are consistent.
5222 *
5223 * This is only okay since the processor is dead and cannot
5224 * race with what we are doing.
5225 */
2244b95a 5226 refresh_cpu_vm_stats(cpu);
1da177e4
LT
5227 }
5228 return NOTIFY_OK;
5229}
1da177e4
LT
5230
5231void __init page_alloc_init(void)
5232{
5233 hotcpu_notifier(page_alloc_cpu_notify, 0);
5234}
5235
cb45b0e9
HA
5236/*
5237 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
5238 * or min_free_kbytes changes.
5239 */
5240static void calculate_totalreserve_pages(void)
5241{
5242 struct pglist_data *pgdat;
5243 unsigned long reserve_pages = 0;
2f6726e5 5244 enum zone_type i, j;
cb45b0e9
HA
5245
5246 for_each_online_pgdat(pgdat) {
5247 for (i = 0; i < MAX_NR_ZONES; i++) {
5248 struct zone *zone = pgdat->node_zones + i;
5249 unsigned long max = 0;
5250
5251 /* Find valid and maximum lowmem_reserve in the zone */
5252 for (j = i; j < MAX_NR_ZONES; j++) {
5253 if (zone->lowmem_reserve[j] > max)
5254 max = zone->lowmem_reserve[j];
5255 }
5256
41858966
MG
5257 /* we treat the high watermark as reserved pages. */
5258 max += high_wmark_pages(zone);
cb45b0e9 5259
b40da049
JL
5260 if (max > zone->managed_pages)
5261 max = zone->managed_pages;
cb45b0e9 5262 reserve_pages += max;
ab8fabd4
JW
5263 /*
5264 * Lowmem reserves are not available to
5265 * GFP_HIGHUSER page cache allocations and
5266 * kswapd tries to balance zones to their high
5267 * watermark. As a result, neither should be
5268 * regarded as dirtyable memory, to prevent a
5269 * situation where reclaim has to clean pages
5270 * in order to balance the zones.
5271 */
5272 zone->dirty_balance_reserve = max;
cb45b0e9
HA
5273 }
5274 }
ab8fabd4 5275 dirty_balance_reserve = reserve_pages;
cb45b0e9
HA
5276 totalreserve_pages = reserve_pages;
5277}
5278
1da177e4
LT
5279/*
5280 * setup_per_zone_lowmem_reserve - called whenever
5281 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
5282 * has a correct pages reserved value, so an adequate number of
5283 * pages are left in the zone after a successful __alloc_pages().
5284 */
5285static void setup_per_zone_lowmem_reserve(void)
5286{
5287 struct pglist_data *pgdat;
2f6726e5 5288 enum zone_type j, idx;
1da177e4 5289
ec936fc5 5290 for_each_online_pgdat(pgdat) {
1da177e4
LT
5291 for (j = 0; j < MAX_NR_ZONES; j++) {
5292 struct zone *zone = pgdat->node_zones + j;
b40da049 5293 unsigned long managed_pages = zone->managed_pages;
1da177e4
LT
5294
5295 zone->lowmem_reserve[j] = 0;
5296
2f6726e5
CL
5297 idx = j;
5298 while (idx) {
1da177e4
LT
5299 struct zone *lower_zone;
5300
2f6726e5
CL
5301 idx--;
5302
1da177e4
LT
5303 if (sysctl_lowmem_reserve_ratio[idx] < 1)
5304 sysctl_lowmem_reserve_ratio[idx] = 1;
5305
5306 lower_zone = pgdat->node_zones + idx;
b40da049 5307 lower_zone->lowmem_reserve[j] = managed_pages /
1da177e4 5308 sysctl_lowmem_reserve_ratio[idx];
b40da049 5309 managed_pages += lower_zone->managed_pages;
1da177e4
LT
5310 }
5311 }
5312 }
cb45b0e9
HA
5313
5314 /* update totalreserve_pages */
5315 calculate_totalreserve_pages();
1da177e4
LT
5316}
5317
cfd3da1e 5318static void __setup_per_zone_wmarks(void)
1da177e4
LT
5319{
5320 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5321 unsigned long lowmem_pages = 0;
5322 struct zone *zone;
5323 unsigned long flags;
5324
5325 /* Calculate total number of !ZONE_HIGHMEM pages */
5326 for_each_zone(zone) {
5327 if (!is_highmem(zone))
b40da049 5328 lowmem_pages += zone->managed_pages;
1da177e4
LT
5329 }
5330
5331 for_each_zone(zone) {
ac924c60
AM
5332 u64 tmp;
5333
1125b4e3 5334 spin_lock_irqsave(&zone->lock, flags);
b40da049 5335 tmp = (u64)pages_min * zone->managed_pages;
ac924c60 5336 do_div(tmp, lowmem_pages);
1da177e4
LT
5337 if (is_highmem(zone)) {
5338 /*
669ed175
NP
5339 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5340 * need highmem pages, so cap pages_min to a small
5341 * value here.
5342 *
41858966 5343 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
669ed175
NP
5344 * deltas controls asynch page reclaim, and so should
5345 * not be capped for highmem.
1da177e4 5346 */
90ae8d67 5347 unsigned long min_pages;
1da177e4 5348
b40da049 5349 min_pages = zone->managed_pages / 1024;
90ae8d67 5350 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
41858966 5351 zone->watermark[WMARK_MIN] = min_pages;
1da177e4 5352 } else {
669ed175
NP
5353 /*
5354 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
5355 * proportionate to the zone's size.
5356 */
41858966 5357 zone->watermark[WMARK_MIN] = tmp;
1da177e4
LT
5358 }
5359
41858966
MG
5360 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
5361 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
49f223a9 5362
56fd56b8 5363 setup_zone_migrate_reserve(zone);
1125b4e3 5364 spin_unlock_irqrestore(&zone->lock, flags);
1da177e4 5365 }
cb45b0e9
HA
5366
5367 /* update totalreserve_pages */
5368 calculate_totalreserve_pages();
1da177e4
LT
5369}
5370
cfd3da1e
MG
5371/**
5372 * setup_per_zone_wmarks - called when min_free_kbytes changes
5373 * or when memory is hot-{added|removed}
5374 *
5375 * Ensures that the watermark[min,low,high] values for each zone are set
5376 * correctly with respect to min_free_kbytes.
5377 */
5378void setup_per_zone_wmarks(void)
5379{
5380 mutex_lock(&zonelists_mutex);
5381 __setup_per_zone_wmarks();
5382 mutex_unlock(&zonelists_mutex);
5383}
5384
55a4462a 5385/*
556adecb
RR
5386 * The inactive anon list should be small enough that the VM never has to
5387 * do too much work, but large enough that each inactive page has a chance
5388 * to be referenced again before it is swapped out.
5389 *
5390 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
5391 * INACTIVE_ANON pages on this zone's LRU, maintained by the
5392 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
5393 * the anonymous pages are kept on the inactive list.
5394 *
5395 * total target max
5396 * memory ratio inactive anon
5397 * -------------------------------------
5398 * 10MB 1 5MB
5399 * 100MB 1 50MB
5400 * 1GB 3 250MB
5401 * 10GB 10 0.9GB
5402 * 100GB 31 3GB
5403 * 1TB 101 10GB
5404 * 10TB 320 32GB
5405 */
1b79acc9 5406static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
556adecb 5407{
96cb4df5 5408 unsigned int gb, ratio;
556adecb 5409
96cb4df5 5410 /* Zone size in gigabytes */
b40da049 5411 gb = zone->managed_pages >> (30 - PAGE_SHIFT);
96cb4df5 5412 if (gb)
556adecb 5413 ratio = int_sqrt(10 * gb);
96cb4df5
MK
5414 else
5415 ratio = 1;
556adecb 5416
96cb4df5
MK
5417 zone->inactive_ratio = ratio;
5418}
556adecb 5419
839a4fcc 5420static void __meminit setup_per_zone_inactive_ratio(void)
96cb4df5
MK
5421{
5422 struct zone *zone;
5423
5424 for_each_zone(zone)
5425 calculate_zone_inactive_ratio(zone);
556adecb
RR
5426}
5427
1da177e4
LT
5428/*
5429 * Initialise min_free_kbytes.
5430 *
5431 * For small machines we want it small (128k min). For large machines
5432 * we want it large (64MB max). But it is not linear, because network
5433 * bandwidth does not increase linearly with machine size. We use
5434 *
5435 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5436 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
5437 *
5438 * which yields
5439 *
5440 * 16MB: 512k
5441 * 32MB: 724k
5442 * 64MB: 1024k
5443 * 128MB: 1448k
5444 * 256MB: 2048k
5445 * 512MB: 2896k
5446 * 1024MB: 4096k
5447 * 2048MB: 5792k
5448 * 4096MB: 8192k
5449 * 8192MB: 11584k
5450 * 16384MB: 16384k
5451 */
1b79acc9 5452int __meminit init_per_zone_wmark_min(void)
1da177e4
LT
5453{
5454 unsigned long lowmem_kbytes;
5455
5456 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5457
5458 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5459 if (min_free_kbytes < 128)
5460 min_free_kbytes = 128;
5461 if (min_free_kbytes > 65536)
5462 min_free_kbytes = 65536;
bc75d33f 5463 setup_per_zone_wmarks();
a6cccdc3 5464 refresh_zone_stat_thresholds();
1da177e4 5465 setup_per_zone_lowmem_reserve();
556adecb 5466 setup_per_zone_inactive_ratio();
1da177e4
LT
5467 return 0;
5468}
bc75d33f 5469module_init(init_per_zone_wmark_min)
1da177e4
LT
5470
5471/*
5472 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5473 * that we can call two helper functions whenever min_free_kbytes
5474 * changes.
5475 */
5476int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
8d65af78 5477 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 5478{
8d65af78 5479 proc_dointvec(table, write, buffer, length, ppos);
3b1d92c5 5480 if (write)
bc75d33f 5481 setup_per_zone_wmarks();
1da177e4
LT
5482 return 0;
5483}
5484
9614634f
CL
5485#ifdef CONFIG_NUMA
5486int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5487 void __user *buffer, size_t *length, loff_t *ppos)
9614634f
CL
5488{
5489 struct zone *zone;
5490 int rc;
5491
8d65af78 5492 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
9614634f
CL
5493 if (rc)
5494 return rc;
5495
5496 for_each_zone(zone)
b40da049 5497 zone->min_unmapped_pages = (zone->managed_pages *
9614634f
CL
5498 sysctl_min_unmapped_ratio) / 100;
5499 return 0;
5500}
0ff38490
CL
5501
5502int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5503 void __user *buffer, size_t *length, loff_t *ppos)
0ff38490
CL
5504{
5505 struct zone *zone;
5506 int rc;
5507
8d65af78 5508 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
0ff38490
CL
5509 if (rc)
5510 return rc;
5511
5512 for_each_zone(zone)
b40da049 5513 zone->min_slab_pages = (zone->managed_pages *
0ff38490
CL
5514 sysctl_min_slab_ratio) / 100;
5515 return 0;
5516}
9614634f
CL
5517#endif
5518
1da177e4
LT
5519/*
5520 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5521 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5522 * whenever sysctl_lowmem_reserve_ratio changes.
5523 *
5524 * The reserve ratio obviously has absolutely no relation with the
41858966 5525 * minimum watermarks. The lowmem reserve ratio can only make sense
1da177e4
LT
5526 * if in function of the boot time zone sizes.
5527 */
5528int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5529 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 5530{
8d65af78 5531 proc_dointvec_minmax(table, write, buffer, length, ppos);
1da177e4
LT
5532 setup_per_zone_lowmem_reserve();
5533 return 0;
5534}
5535
8ad4b1fb
RS
5536/*
5537 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5538 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
5539 * can have before it gets flushed back to buddy allocator.
5540 */
5541
5542int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
8d65af78 5543 void __user *buffer, size_t *length, loff_t *ppos)
8ad4b1fb
RS
5544{
5545 struct zone *zone;
5546 unsigned int cpu;
5547 int ret;
5548
8d65af78 5549 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
93278814 5550 if (!write || (ret < 0))
8ad4b1fb 5551 return ret;
364df0eb 5552 for_each_populated_zone(zone) {
99dcc3e5 5553 for_each_possible_cpu(cpu) {
8ad4b1fb 5554 unsigned long high;
b40da049 5555 high = zone->managed_pages / percpu_pagelist_fraction;
99dcc3e5
CL
5556 setup_pagelist_highmark(
5557 per_cpu_ptr(zone->pageset, cpu), high);
8ad4b1fb
RS
5558 }
5559 }
5560 return 0;
5561}
5562
f034b5d4 5563int hashdist = HASHDIST_DEFAULT;
1da177e4
LT
5564
5565#ifdef CONFIG_NUMA
5566static int __init set_hashdist(char *str)
5567{
5568 if (!str)
5569 return 0;
5570 hashdist = simple_strtoul(str, &str, 0);
5571 return 1;
5572}
5573__setup("hashdist=", set_hashdist);
5574#endif
5575
5576/*
5577 * allocate a large system hash table from bootmem
5578 * - it is assumed that the hash table must contain an exact power-of-2
5579 * quantity of entries
5580 * - limit is the number of hash buckets, not the total allocation size
5581 */
5582void *__init alloc_large_system_hash(const char *tablename,
5583 unsigned long bucketsize,
5584 unsigned long numentries,
5585 int scale,
5586 int flags,
5587 unsigned int *_hash_shift,
5588 unsigned int *_hash_mask,
31fe62b9
TB
5589 unsigned long low_limit,
5590 unsigned long high_limit)
1da177e4 5591{
31fe62b9 5592 unsigned long long max = high_limit;
1da177e4
LT
5593 unsigned long log2qty, size;
5594 void *table = NULL;
5595
5596 /* allow the kernel cmdline to have a say */
5597 if (!numentries) {
5598 /* round applicable memory size up to nearest megabyte */
04903664 5599 numentries = nr_kernel_pages;
1da177e4
LT
5600 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5601 numentries >>= 20 - PAGE_SHIFT;
5602 numentries <<= 20 - PAGE_SHIFT;
5603
5604 /* limit to 1 bucket per 2^scale bytes of low memory */
5605 if (scale > PAGE_SHIFT)
5606 numentries >>= (scale - PAGE_SHIFT);
5607 else
5608 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
5609
5610 /* Make sure we've got at least a 0-order allocation.. */
2c85f51d
JB
5611 if (unlikely(flags & HASH_SMALL)) {
5612 /* Makes no sense without HASH_EARLY */
5613 WARN_ON(!(flags & HASH_EARLY));
5614 if (!(numentries >> *_hash_shift)) {
5615 numentries = 1UL << *_hash_shift;
5616 BUG_ON(!numentries);
5617 }
5618 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9ab37b8f 5619 numentries = PAGE_SIZE / bucketsize;
1da177e4 5620 }
6e692ed3 5621 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
5622
5623 /* limit allocation size to 1/16 total memory by default */
5624 if (max == 0) {
5625 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5626 do_div(max, bucketsize);
5627 }
074b8517 5628 max = min(max, 0x80000000ULL);
1da177e4 5629
31fe62b9
TB
5630 if (numentries < low_limit)
5631 numentries = low_limit;
1da177e4
LT
5632 if (numentries > max)
5633 numentries = max;
5634
f0d1b0b3 5635 log2qty = ilog2(numentries);
1da177e4
LT
5636
5637 do {
5638 size = bucketsize << log2qty;
5639 if (flags & HASH_EARLY)
74768ed8 5640 table = alloc_bootmem_nopanic(size);
1da177e4
LT
5641 else if (hashdist)
5642 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5643 else {
1037b83b
ED
5644 /*
5645 * If bucketsize is not a power-of-two, we may free
a1dd268c
MG
5646 * some pages at the end of hash table which
5647 * alloc_pages_exact() automatically does
1037b83b 5648 */
264ef8a9 5649 if (get_order(size) < MAX_ORDER) {
a1dd268c 5650 table = alloc_pages_exact(size, GFP_ATOMIC);
264ef8a9
CM
5651 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5652 }
1da177e4
LT
5653 }
5654 } while (!table && size > PAGE_SIZE && --log2qty);
5655
5656 if (!table)
5657 panic("Failed to allocate %s hash table\n", tablename);
5658
f241e660 5659 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
1da177e4 5660 tablename,
f241e660 5661 (1UL << log2qty),
f0d1b0b3 5662 ilog2(size) - PAGE_SHIFT,
1da177e4
LT
5663 size);
5664
5665 if (_hash_shift)
5666 *_hash_shift = log2qty;
5667 if (_hash_mask)
5668 *_hash_mask = (1 << log2qty) - 1;
5669
5670 return table;
5671}
a117e66e 5672
835c134e
MG
5673/* Return a pointer to the bitmap storing bits affecting a block of pages */
5674static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5675 unsigned long pfn)
5676{
5677#ifdef CONFIG_SPARSEMEM
5678 return __pfn_to_section(pfn)->pageblock_flags;
5679#else
5680 return zone->pageblock_flags;
5681#endif /* CONFIG_SPARSEMEM */
5682}
5683
5684static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5685{
5686#ifdef CONFIG_SPARSEMEM
5687 pfn &= (PAGES_PER_SECTION-1);
d9c23400 5688 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e 5689#else
c060f943 5690 pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
d9c23400 5691 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
5692#endif /* CONFIG_SPARSEMEM */
5693}
5694
5695/**
d9c23400 5696 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
835c134e
MG
5697 * @page: The page within the block of interest
5698 * @start_bitidx: The first bit of interest to retrieve
5699 * @end_bitidx: The last bit of interest
5700 * returns pageblock_bits flags
5701 */
5702unsigned long get_pageblock_flags_group(struct page *page,
5703 int start_bitidx, int end_bitidx)
5704{
5705 struct zone *zone;
5706 unsigned long *bitmap;
5707 unsigned long pfn, bitidx;
5708 unsigned long flags = 0;
5709 unsigned long value = 1;
5710
5711 zone = page_zone(page);
5712 pfn = page_to_pfn(page);
5713 bitmap = get_pageblock_bitmap(zone, pfn);
5714 bitidx = pfn_to_bitidx(zone, pfn);
5715
5716 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5717 if (test_bit(bitidx + start_bitidx, bitmap))
5718 flags |= value;
6220ec78 5719
835c134e
MG
5720 return flags;
5721}
5722
5723/**
d9c23400 5724 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
835c134e
MG
5725 * @page: The page within the block of interest
5726 * @start_bitidx: The first bit of interest
5727 * @end_bitidx: The last bit of interest
5728 * @flags: The flags to set
5729 */
5730void set_pageblock_flags_group(struct page *page, unsigned long flags,
5731 int start_bitidx, int end_bitidx)
5732{
5733 struct zone *zone;
5734 unsigned long *bitmap;
5735 unsigned long pfn, bitidx;
5736 unsigned long value = 1;
5737
5738 zone = page_zone(page);
5739 pfn = page_to_pfn(page);
5740 bitmap = get_pageblock_bitmap(zone, pfn);
5741 bitidx = pfn_to_bitidx(zone, pfn);
108bcc96 5742 VM_BUG_ON(!zone_spans_pfn(zone, pfn));
835c134e
MG
5743
5744 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5745 if (flags & value)
5746 __set_bit(bitidx + start_bitidx, bitmap);
5747 else
5748 __clear_bit(bitidx + start_bitidx, bitmap);
5749}
a5d76b54
KH
5750
5751/*
80934513
MK
5752 * This function checks whether pageblock includes unmovable pages or not.
5753 * If @count is not zero, it is okay to include less @count unmovable pages
5754 *
5755 * PageLRU check wihtout isolation or lru_lock could race so that
5756 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
5757 * expect this function should be exact.
a5d76b54 5758 */
b023f468
WC
5759bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
5760 bool skip_hwpoisoned_pages)
49ac8255
KH
5761{
5762 unsigned long pfn, iter, found;
47118af0
MN
5763 int mt;
5764
49ac8255
KH
5765 /*
5766 * For avoiding noise data, lru_add_drain_all() should be called
80934513 5767 * If ZONE_MOVABLE, the zone never contains unmovable pages
49ac8255
KH
5768 */
5769 if (zone_idx(zone) == ZONE_MOVABLE)
80934513 5770 return false;
47118af0
MN
5771 mt = get_pageblock_migratetype(page);
5772 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
80934513 5773 return false;
49ac8255
KH
5774
5775 pfn = page_to_pfn(page);
5776 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5777 unsigned long check = pfn + iter;
5778
29723fcc 5779 if (!pfn_valid_within(check))
49ac8255 5780 continue;
29723fcc 5781
49ac8255 5782 page = pfn_to_page(check);
97d255c8
MK
5783 /*
5784 * We can't use page_count without pin a page
5785 * because another CPU can free compound page.
5786 * This check already skips compound tails of THP
5787 * because their page->_count is zero at all time.
5788 */
5789 if (!atomic_read(&page->_count)) {
49ac8255
KH
5790 if (PageBuddy(page))
5791 iter += (1 << page_order(page)) - 1;
5792 continue;
5793 }
97d255c8 5794
b023f468
WC
5795 /*
5796 * The HWPoisoned page may be not in buddy system, and
5797 * page_count() is not 0.
5798 */
5799 if (skip_hwpoisoned_pages && PageHWPoison(page))
5800 continue;
5801
49ac8255
KH
5802 if (!PageLRU(page))
5803 found++;
5804 /*
5805 * If there are RECLAIMABLE pages, we need to check it.
5806 * But now, memory offline itself doesn't call shrink_slab()
5807 * and it still to be fixed.
5808 */
5809 /*
5810 * If the page is not RAM, page_count()should be 0.
5811 * we don't need more check. This is an _used_ not-movable page.
5812 *
5813 * The problematic thing here is PG_reserved pages. PG_reserved
5814 * is set to both of a memory hole page and a _used_ kernel
5815 * page at boot.
5816 */
5817 if (found > count)
80934513 5818 return true;
49ac8255 5819 }
80934513 5820 return false;
49ac8255
KH
5821}
5822
5823bool is_pageblock_removable_nolock(struct page *page)
5824{
656a0706
MH
5825 struct zone *zone;
5826 unsigned long pfn;
687875fb
MH
5827
5828 /*
5829 * We have to be careful here because we are iterating over memory
5830 * sections which are not zone aware so we might end up outside of
5831 * the zone but still within the section.
656a0706
MH
5832 * We have to take care about the node as well. If the node is offline
5833 * its NODE_DATA will be NULL - see page_zone.
687875fb 5834 */
656a0706
MH
5835 if (!node_online(page_to_nid(page)))
5836 return false;
5837
5838 zone = page_zone(page);
5839 pfn = page_to_pfn(page);
108bcc96 5840 if (!zone_spans_pfn(zone, pfn))
687875fb
MH
5841 return false;
5842
b023f468 5843 return !has_unmovable_pages(zone, page, 0, true);
a5d76b54 5844}
0c0e6195 5845
041d3a8c
MN
5846#ifdef CONFIG_CMA
5847
5848static unsigned long pfn_max_align_down(unsigned long pfn)
5849{
5850 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
5851 pageblock_nr_pages) - 1);
5852}
5853
5854static unsigned long pfn_max_align_up(unsigned long pfn)
5855{
5856 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
5857 pageblock_nr_pages));
5858}
5859
041d3a8c 5860/* [start, end) must belong to a single zone. */
bb13ffeb
MG
5861static int __alloc_contig_migrate_range(struct compact_control *cc,
5862 unsigned long start, unsigned long end)
041d3a8c
MN
5863{
5864 /* This function is based on compact_zone() from compaction.c. */
beb51eaa 5865 unsigned long nr_reclaimed;
041d3a8c
MN
5866 unsigned long pfn = start;
5867 unsigned int tries = 0;
5868 int ret = 0;
5869
be49a6e1 5870 migrate_prep();
041d3a8c 5871
bb13ffeb 5872 while (pfn < end || !list_empty(&cc->migratepages)) {
041d3a8c
MN
5873 if (fatal_signal_pending(current)) {
5874 ret = -EINTR;
5875 break;
5876 }
5877
bb13ffeb
MG
5878 if (list_empty(&cc->migratepages)) {
5879 cc->nr_migratepages = 0;
5880 pfn = isolate_migratepages_range(cc->zone, cc,
e46a2879 5881 pfn, end, true);
041d3a8c
MN
5882 if (!pfn) {
5883 ret = -EINTR;
5884 break;
5885 }
5886 tries = 0;
5887 } else if (++tries == 5) {
5888 ret = ret < 0 ? ret : -EBUSY;
5889 break;
5890 }
5891
beb51eaa
MK
5892 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
5893 &cc->migratepages);
5894 cc->nr_migratepages -= nr_reclaimed;
02c6de8d 5895
9c620e2b
HD
5896 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
5897 0, MIGRATE_SYNC, MR_CMA);
041d3a8c 5898 }
2a6f5124
SP
5899 if (ret < 0) {
5900 putback_movable_pages(&cc->migratepages);
5901 return ret;
5902 }
5903 return 0;
041d3a8c
MN
5904}
5905
5906/**
5907 * alloc_contig_range() -- tries to allocate given range of pages
5908 * @start: start PFN to allocate
5909 * @end: one-past-the-last PFN to allocate
0815f3d8
MN
5910 * @migratetype: migratetype of the underlaying pageblocks (either
5911 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
5912 * in range must have the same migratetype and it must
5913 * be either of the two.
041d3a8c
MN
5914 *
5915 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
5916 * aligned, however it's the caller's responsibility to guarantee that
5917 * we are the only thread that changes migrate type of pageblocks the
5918 * pages fall in.
5919 *
5920 * The PFN range must belong to a single zone.
5921 *
5922 * Returns zero on success or negative error code. On success all
5923 * pages which PFN is in [start, end) are allocated for the caller and
5924 * need to be freed with free_contig_range().
5925 */
0815f3d8
MN
5926int alloc_contig_range(unsigned long start, unsigned long end,
5927 unsigned migratetype)
041d3a8c 5928{
041d3a8c
MN
5929 unsigned long outer_start, outer_end;
5930 int ret = 0, order;
5931
bb13ffeb
MG
5932 struct compact_control cc = {
5933 .nr_migratepages = 0,
5934 .order = -1,
5935 .zone = page_zone(pfn_to_page(start)),
5936 .sync = true,
5937 .ignore_skip_hint = true,
5938 };
5939 INIT_LIST_HEAD(&cc.migratepages);
5940
041d3a8c
MN
5941 /*
5942 * What we do here is we mark all pageblocks in range as
5943 * MIGRATE_ISOLATE. Because pageblock and max order pages may
5944 * have different sizes, and due to the way page allocator
5945 * work, we align the range to biggest of the two pages so
5946 * that page allocator won't try to merge buddies from
5947 * different pageblocks and change MIGRATE_ISOLATE to some
5948 * other migration type.
5949 *
5950 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
5951 * migrate the pages from an unaligned range (ie. pages that
5952 * we are interested in). This will put all the pages in
5953 * range back to page allocator as MIGRATE_ISOLATE.
5954 *
5955 * When this is done, we take the pages in range from page
5956 * allocator removing them from the buddy system. This way
5957 * page allocator will never consider using them.
5958 *
5959 * This lets us mark the pageblocks back as
5960 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
5961 * aligned range but not in the unaligned, original range are
5962 * put back to page allocator so that buddy can use them.
5963 */
5964
5965 ret = start_isolate_page_range(pfn_max_align_down(start),
b023f468
WC
5966 pfn_max_align_up(end), migratetype,
5967 false);
041d3a8c 5968 if (ret)
86a595f9 5969 return ret;
041d3a8c 5970
bb13ffeb 5971 ret = __alloc_contig_migrate_range(&cc, start, end);
041d3a8c
MN
5972 if (ret)
5973 goto done;
5974
5975 /*
5976 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
5977 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
5978 * more, all pages in [start, end) are free in page allocator.
5979 * What we are going to do is to allocate all pages from
5980 * [start, end) (that is remove them from page allocator).
5981 *
5982 * The only problem is that pages at the beginning and at the
5983 * end of interesting range may be not aligned with pages that
5984 * page allocator holds, ie. they can be part of higher order
5985 * pages. Because of this, we reserve the bigger range and
5986 * once this is done free the pages we are not interested in.
5987 *
5988 * We don't have to hold zone->lock here because the pages are
5989 * isolated thus they won't get removed from buddy.
5990 */
5991
5992 lru_add_drain_all();
5993 drain_all_pages();
5994
5995 order = 0;
5996 outer_start = start;
5997 while (!PageBuddy(pfn_to_page(outer_start))) {
5998 if (++order >= MAX_ORDER) {
5999 ret = -EBUSY;
6000 goto done;
6001 }
6002 outer_start &= ~0UL << order;
6003 }
6004
6005 /* Make sure the range is really isolated. */
b023f468 6006 if (test_pages_isolated(outer_start, end, false)) {
041d3a8c
MN
6007 pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
6008 outer_start, end);
6009 ret = -EBUSY;
6010 goto done;
6011 }
6012
49f223a9
MS
6013
6014 /* Grab isolated pages from freelists. */
bb13ffeb 6015 outer_end = isolate_freepages_range(&cc, outer_start, end);
041d3a8c
MN
6016 if (!outer_end) {
6017 ret = -EBUSY;
6018 goto done;
6019 }
6020
6021 /* Free head and tail (if any) */
6022 if (start != outer_start)
6023 free_contig_range(outer_start, start - outer_start);
6024 if (end != outer_end)
6025 free_contig_range(end, outer_end - end);
6026
6027done:
6028 undo_isolate_page_range(pfn_max_align_down(start),
0815f3d8 6029 pfn_max_align_up(end), migratetype);
041d3a8c
MN
6030 return ret;
6031}
6032
6033void free_contig_range(unsigned long pfn, unsigned nr_pages)
6034{
bcc2b02f
MS
6035 unsigned int count = 0;
6036
6037 for (; nr_pages--; pfn++) {
6038 struct page *page = pfn_to_page(pfn);
6039
6040 count += page_count(page) != 1;
6041 __free_page(page);
6042 }
6043 WARN(count != 0, "%d pages are still in use!\n", count);
041d3a8c
MN
6044}
6045#endif
6046
4ed7e022
JL
6047#ifdef CONFIG_MEMORY_HOTPLUG
6048static int __meminit __zone_pcp_update(void *data)
6049{
6050 struct zone *zone = data;
6051 int cpu;
6052 unsigned long batch = zone_batchsize(zone), flags;
6053
6054 for_each_possible_cpu(cpu) {
6055 struct per_cpu_pageset *pset;
6056 struct per_cpu_pages *pcp;
6057
6058 pset = per_cpu_ptr(zone->pageset, cpu);
6059 pcp = &pset->pcp;
6060
6061 local_irq_save(flags);
6062 if (pcp->count > 0)
6063 free_pcppages_bulk(zone, pcp->count, pcp);
5a883813 6064 drain_zonestat(zone, pset);
4ed7e022
JL
6065 setup_pageset(pset, batch);
6066 local_irq_restore(flags);
6067 }
6068 return 0;
6069}
6070
6071void __meminit zone_pcp_update(struct zone *zone)
6072{
6073 stop_machine(__zone_pcp_update, zone, NULL);
6074}
6075#endif
6076
340175b7
JL
6077void zone_pcp_reset(struct zone *zone)
6078{
6079 unsigned long flags;
5a883813
MK
6080 int cpu;
6081 struct per_cpu_pageset *pset;
340175b7
JL
6082
6083 /* avoid races with drain_pages() */
6084 local_irq_save(flags);
6085 if (zone->pageset != &boot_pageset) {
5a883813
MK
6086 for_each_online_cpu(cpu) {
6087 pset = per_cpu_ptr(zone->pageset, cpu);
6088 drain_zonestat(zone, pset);
6089 }
340175b7
JL
6090 free_percpu(zone->pageset);
6091 zone->pageset = &boot_pageset;
6092 }
6093 local_irq_restore(flags);
6094}
6095
6dcd73d7 6096#ifdef CONFIG_MEMORY_HOTREMOVE
0c0e6195
KH
6097/*
6098 * All pages in the range must be isolated before calling this.
6099 */
6100void
6101__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6102{
6103 struct page *page;
6104 struct zone *zone;
6105 int order, i;
6106 unsigned long pfn;
6107 unsigned long flags;
6108 /* find the first valid pfn */
6109 for (pfn = start_pfn; pfn < end_pfn; pfn++)
6110 if (pfn_valid(pfn))
6111 break;
6112 if (pfn == end_pfn)
6113 return;
6114 zone = page_zone(pfn_to_page(pfn));
6115 spin_lock_irqsave(&zone->lock, flags);
6116 pfn = start_pfn;
6117 while (pfn < end_pfn) {
6118 if (!pfn_valid(pfn)) {
6119 pfn++;
6120 continue;
6121 }
6122 page = pfn_to_page(pfn);
b023f468
WC
6123 /*
6124 * The HWPoisoned page may be not in buddy system, and
6125 * page_count() is not 0.
6126 */
6127 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
6128 pfn++;
6129 SetPageReserved(page);
6130 continue;
6131 }
6132
0c0e6195
KH
6133 BUG_ON(page_count(page));
6134 BUG_ON(!PageBuddy(page));
6135 order = page_order(page);
6136#ifdef CONFIG_DEBUG_VM
6137 printk(KERN_INFO "remove from free list %lx %d %lx\n",
6138 pfn, 1 << order, end_pfn);
6139#endif
6140 list_del(&page->lru);
6141 rmv_page_order(page);
6142 zone->free_area[order].nr_free--;
0c0e6195
KH
6143 for (i = 0; i < (1 << order); i++)
6144 SetPageReserved((page+i));
6145 pfn += (1 << order);
6146 }
6147 spin_unlock_irqrestore(&zone->lock, flags);
6148}
6149#endif
8d22ba1b
WF
6150
6151#ifdef CONFIG_MEMORY_FAILURE
6152bool is_free_buddy_page(struct page *page)
6153{
6154 struct zone *zone = page_zone(page);
6155 unsigned long pfn = page_to_pfn(page);
6156 unsigned long flags;
6157 int order;
6158
6159 spin_lock_irqsave(&zone->lock, flags);
6160 for (order = 0; order < MAX_ORDER; order++) {
6161 struct page *page_head = page - (pfn & ((1 << order) - 1));
6162
6163 if (PageBuddy(page_head) && page_order(page_head) >= order)
6164 break;
6165 }
6166 spin_unlock_irqrestore(&zone->lock, flags);
6167
6168 return order < MAX_ORDER;
6169}
6170#endif
718a3821 6171
51300cef 6172static const struct trace_print_flags pageflag_names[] = {
718a3821
WF
6173 {1UL << PG_locked, "locked" },
6174 {1UL << PG_error, "error" },
6175 {1UL << PG_referenced, "referenced" },
6176 {1UL << PG_uptodate, "uptodate" },
6177 {1UL << PG_dirty, "dirty" },
6178 {1UL << PG_lru, "lru" },
6179 {1UL << PG_active, "active" },
6180 {1UL << PG_slab, "slab" },
6181 {1UL << PG_owner_priv_1, "owner_priv_1" },
6182 {1UL << PG_arch_1, "arch_1" },
6183 {1UL << PG_reserved, "reserved" },
6184 {1UL << PG_private, "private" },
6185 {1UL << PG_private_2, "private_2" },
6186 {1UL << PG_writeback, "writeback" },
6187#ifdef CONFIG_PAGEFLAGS_EXTENDED
6188 {1UL << PG_head, "head" },
6189 {1UL << PG_tail, "tail" },
6190#else
6191 {1UL << PG_compound, "compound" },
6192#endif
6193 {1UL << PG_swapcache, "swapcache" },
6194 {1UL << PG_mappedtodisk, "mappedtodisk" },
6195 {1UL << PG_reclaim, "reclaim" },
718a3821
WF
6196 {1UL << PG_swapbacked, "swapbacked" },
6197 {1UL << PG_unevictable, "unevictable" },
6198#ifdef CONFIG_MMU
6199 {1UL << PG_mlocked, "mlocked" },
6200#endif
6201#ifdef CONFIG_ARCH_USES_PG_UNCACHED
6202 {1UL << PG_uncached, "uncached" },
6203#endif
6204#ifdef CONFIG_MEMORY_FAILURE
6205 {1UL << PG_hwpoison, "hwpoison" },
be9cd873
GS
6206#endif
6207#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6208 {1UL << PG_compound_lock, "compound_lock" },
718a3821 6209#endif
718a3821
WF
6210};
6211
6212static void dump_page_flags(unsigned long flags)
6213{
6214 const char *delim = "";
6215 unsigned long mask;
6216 int i;
6217
51300cef 6218 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
acc50c11 6219
718a3821
WF
6220 printk(KERN_ALERT "page flags: %#lx(", flags);
6221
6222 /* remove zone id */
6223 flags &= (1UL << NR_PAGEFLAGS) - 1;
6224
51300cef 6225 for (i = 0; i < ARRAY_SIZE(pageflag_names) && flags; i++) {
718a3821
WF
6226
6227 mask = pageflag_names[i].mask;
6228 if ((flags & mask) != mask)
6229 continue;
6230
6231 flags &= ~mask;
6232 printk("%s%s", delim, pageflag_names[i].name);
6233 delim = "|";
6234 }
6235
6236 /* check for left over flags */
6237 if (flags)
6238 printk("%s%#lx", delim, flags);
6239
6240 printk(")\n");
6241}
6242
6243void dump_page(struct page *page)
6244{
6245 printk(KERN_ALERT
6246 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
4e9f64c4 6247 page, atomic_read(&page->_count), page_mapcount(page),
718a3821
WF
6248 page->mapping, page->index);
6249 dump_page_flags(page->flags);
f212ad7c 6250 mem_cgroup_print_bad_page(page);
718a3821 6251}