Merge tag 'arc-v3.9-rc1-late' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
10ed273f 22#include <linux/jiffies.h>
1da177e4 23#include <linux/bootmem.h>
edbe7d23 24#include <linux/memblock.h>
1da177e4 25#include <linux/compiler.h>
9f158333 26#include <linux/kernel.h>
b1eeab67 27#include <linux/kmemcheck.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/suspend.h>
30#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/slab.h>
a238ab5b 33#include <linux/ratelimit.h>
5a3135c2 34#include <linux/oom.h>
1da177e4
LT
35#include <linux/notifier.h>
36#include <linux/topology.h>
37#include <linux/sysctl.h>
38#include <linux/cpu.h>
39#include <linux/cpuset.h>
bdc8cb98 40#include <linux/memory_hotplug.h>
1da177e4
LT
41#include <linux/nodemask.h>
42#include <linux/vmalloc.h>
a6cccdc3 43#include <linux/vmstat.h>
4be38e35 44#include <linux/mempolicy.h>
6811378e 45#include <linux/stop_machine.h>
c713216d
MG
46#include <linux/sort.h>
47#include <linux/pfn.h>
3fcfab16 48#include <linux/backing-dev.h>
933e312e 49#include <linux/fault-inject.h>
a5d76b54 50#include <linux/page-isolation.h>
52d4b9ac 51#include <linux/page_cgroup.h>
3ac7fe5a 52#include <linux/debugobjects.h>
dbb1f81c 53#include <linux/kmemleak.h>
56de7263 54#include <linux/compaction.h>
0d3d062a 55#include <trace/events/kmem.h>
718a3821 56#include <linux/ftrace_event.h>
f212ad7c 57#include <linux/memcontrol.h>
268bb0ce 58#include <linux/prefetch.h>
041d3a8c 59#include <linux/migrate.h>
c0a32fc5 60#include <linux/page-debug-flags.h>
8bd75c77 61#include <linux/sched/rt.h>
1da177e4
LT
62
63#include <asm/tlbflush.h>
ac924c60 64#include <asm/div64.h>
1da177e4
LT
65#include "internal.h"
66
72812019
LS
67#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
68DEFINE_PER_CPU(int, numa_node);
69EXPORT_PER_CPU_SYMBOL(numa_node);
70#endif
71
7aac7898
LS
72#ifdef CONFIG_HAVE_MEMORYLESS_NODES
73/*
74 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
75 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
76 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
77 * defined in <linux/topology.h>.
78 */
79DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
80EXPORT_PER_CPU_SYMBOL(_numa_mem_);
81#endif
82
1da177e4 83/*
13808910 84 * Array of node states.
1da177e4 85 */
13808910
CL
86nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
87 [N_POSSIBLE] = NODE_MASK_ALL,
88 [N_ONLINE] = { { [0] = 1UL } },
89#ifndef CONFIG_NUMA
90 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
91#ifdef CONFIG_HIGHMEM
92 [N_HIGH_MEMORY] = { { [0] = 1UL } },
20b2f52b
LJ
93#endif
94#ifdef CONFIG_MOVABLE_NODE
95 [N_MEMORY] = { { [0] = 1UL } },
13808910
CL
96#endif
97 [N_CPU] = { { [0] = 1UL } },
98#endif /* NUMA */
99};
100EXPORT_SYMBOL(node_states);
101
6c231b7b 102unsigned long totalram_pages __read_mostly;
cb45b0e9 103unsigned long totalreserve_pages __read_mostly;
ab8fabd4
JW
104/*
105 * When calculating the number of globally allowed dirty pages, there
106 * is a certain number of per-zone reserves that should not be
107 * considered dirtyable memory. This is the sum of those reserves
108 * over all existing zones that contribute dirtyable memory.
109 */
110unsigned long dirty_balance_reserve __read_mostly;
111
1b76b02f 112int percpu_pagelist_fraction;
dcce284a 113gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
1da177e4 114
452aa699
RW
115#ifdef CONFIG_PM_SLEEP
116/*
117 * The following functions are used by the suspend/hibernate code to temporarily
118 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
119 * while devices are suspended. To avoid races with the suspend/hibernate code,
120 * they should always be called with pm_mutex held (gfp_allowed_mask also should
121 * only be modified with pm_mutex held, unless the suspend/hibernate code is
122 * guaranteed not to run in parallel with that modification).
123 */
c9e664f1
RW
124
125static gfp_t saved_gfp_mask;
126
127void pm_restore_gfp_mask(void)
452aa699
RW
128{
129 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
130 if (saved_gfp_mask) {
131 gfp_allowed_mask = saved_gfp_mask;
132 saved_gfp_mask = 0;
133 }
452aa699
RW
134}
135
c9e664f1 136void pm_restrict_gfp_mask(void)
452aa699 137{
452aa699 138 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
139 WARN_ON(saved_gfp_mask);
140 saved_gfp_mask = gfp_allowed_mask;
141 gfp_allowed_mask &= ~GFP_IOFS;
452aa699 142}
f90ac398
MG
143
144bool pm_suspended_storage(void)
145{
146 if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
147 return false;
148 return true;
149}
452aa699
RW
150#endif /* CONFIG_PM_SLEEP */
151
d9c23400
MG
152#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
153int pageblock_order __read_mostly;
154#endif
155
d98c7a09 156static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 157
1da177e4
LT
158/*
159 * results with 256, 32 in the lowmem_reserve sysctl:
160 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
161 * 1G machine -> (16M dma, 784M normal, 224M high)
162 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
163 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
164 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
165 *
166 * TBD: should special case ZONE_DMA32 machines here - in those we normally
167 * don't need any ZONE_NORMAL reservation
1da177e4 168 */
2f1b6248 169int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
4b51d669 170#ifdef CONFIG_ZONE_DMA
2f1b6248 171 256,
4b51d669 172#endif
fb0e7942 173#ifdef CONFIG_ZONE_DMA32
2f1b6248 174 256,
fb0e7942 175#endif
e53ef38d 176#ifdef CONFIG_HIGHMEM
2a1e274a 177 32,
e53ef38d 178#endif
2a1e274a 179 32,
2f1b6248 180};
1da177e4
LT
181
182EXPORT_SYMBOL(totalram_pages);
1da177e4 183
15ad7cdc 184static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 185#ifdef CONFIG_ZONE_DMA
2f1b6248 186 "DMA",
4b51d669 187#endif
fb0e7942 188#ifdef CONFIG_ZONE_DMA32
2f1b6248 189 "DMA32",
fb0e7942 190#endif
2f1b6248 191 "Normal",
e53ef38d 192#ifdef CONFIG_HIGHMEM
2a1e274a 193 "HighMem",
e53ef38d 194#endif
2a1e274a 195 "Movable",
2f1b6248
CL
196};
197
1da177e4
LT
198int min_free_kbytes = 1024;
199
2c85f51d
JB
200static unsigned long __meminitdata nr_kernel_pages;
201static unsigned long __meminitdata nr_all_pages;
a3142c8e 202static unsigned long __meminitdata dma_reserve;
1da177e4 203
0ee332c1 204#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
34b71f1e 205/* Movable memory ranges, will also be used by memblock subsystem. */
01a178a9
TC
206struct movablemem_map movablemem_map = {
207 .acpi = false,
208 .nr_map = 0,
209};
34b71f1e 210
0ee332c1
TH
211static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
212static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
213static unsigned long __initdata required_kernelcore;
214static unsigned long __initdata required_movablecore;
215static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
6981ec31 216static unsigned long __meminitdata zone_movable_limit[MAX_NUMNODES];
0ee332c1
TH
217
218/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
219int movable_zone;
220EXPORT_SYMBOL(movable_zone);
221#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 222
418508c1
MS
223#if MAX_NUMNODES > 1
224int nr_node_ids __read_mostly = MAX_NUMNODES;
62bc62a8 225int nr_online_nodes __read_mostly = 1;
418508c1 226EXPORT_SYMBOL(nr_node_ids);
62bc62a8 227EXPORT_SYMBOL(nr_online_nodes);
418508c1
MS
228#endif
229
9ef9acb0
MG
230int page_group_by_mobility_disabled __read_mostly;
231
ee6f509c 232void set_pageblock_migratetype(struct page *page, int migratetype)
b2a0ac88 233{
49255c61
MG
234
235 if (unlikely(page_group_by_mobility_disabled))
236 migratetype = MIGRATE_UNMOVABLE;
237
b2a0ac88
MG
238 set_pageblock_flags_group(page, (unsigned long)migratetype,
239 PB_migrate, PB_migrate_end);
240}
241
7f33d49a
RW
242bool oom_killer_disabled __read_mostly;
243
13e7444b 244#ifdef CONFIG_DEBUG_VM
c6a57e19 245static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 246{
bdc8cb98
DH
247 int ret = 0;
248 unsigned seq;
249 unsigned long pfn = page_to_pfn(page);
b5e6a5a2 250 unsigned long sp, start_pfn;
c6a57e19 251
bdc8cb98
DH
252 do {
253 seq = zone_span_seqbegin(zone);
b5e6a5a2
CS
254 start_pfn = zone->zone_start_pfn;
255 sp = zone->spanned_pages;
108bcc96 256 if (!zone_spans_pfn(zone, pfn))
bdc8cb98
DH
257 ret = 1;
258 } while (zone_span_seqretry(zone, seq));
259
b5e6a5a2
CS
260 if (ret)
261 pr_err("page %lu outside zone [ %lu - %lu ]\n",
262 pfn, start_pfn, start_pfn + sp);
263
bdc8cb98 264 return ret;
c6a57e19
DH
265}
266
267static int page_is_consistent(struct zone *zone, struct page *page)
268{
14e07298 269 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 270 return 0;
1da177e4 271 if (zone != page_zone(page))
c6a57e19
DH
272 return 0;
273
274 return 1;
275}
276/*
277 * Temporary debugging check for pages not lying within a given zone.
278 */
279static int bad_range(struct zone *zone, struct page *page)
280{
281 if (page_outside_zone_boundaries(zone, page))
1da177e4 282 return 1;
c6a57e19
DH
283 if (!page_is_consistent(zone, page))
284 return 1;
285
1da177e4
LT
286 return 0;
287}
13e7444b
NP
288#else
289static inline int bad_range(struct zone *zone, struct page *page)
290{
291 return 0;
292}
293#endif
294
224abf92 295static void bad_page(struct page *page)
1da177e4 296{
d936cf9b
HD
297 static unsigned long resume;
298 static unsigned long nr_shown;
299 static unsigned long nr_unshown;
300
2a7684a2
WF
301 /* Don't complain about poisoned pages */
302 if (PageHWPoison(page)) {
22b751c3 303 page_mapcount_reset(page); /* remove PageBuddy */
2a7684a2
WF
304 return;
305 }
306
d936cf9b
HD
307 /*
308 * Allow a burst of 60 reports, then keep quiet for that minute;
309 * or allow a steady drip of one report per second.
310 */
311 if (nr_shown == 60) {
312 if (time_before(jiffies, resume)) {
313 nr_unshown++;
314 goto out;
315 }
316 if (nr_unshown) {
1e9e6365
HD
317 printk(KERN_ALERT
318 "BUG: Bad page state: %lu messages suppressed\n",
d936cf9b
HD
319 nr_unshown);
320 nr_unshown = 0;
321 }
322 nr_shown = 0;
323 }
324 if (nr_shown++ == 0)
325 resume = jiffies + 60 * HZ;
326
1e9e6365 327 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
3dc14741 328 current->comm, page_to_pfn(page));
718a3821 329 dump_page(page);
3dc14741 330
4f31888c 331 print_modules();
1da177e4 332 dump_stack();
d936cf9b 333out:
8cc3b392 334 /* Leave bad fields for debug, except PageBuddy could make trouble */
22b751c3 335 page_mapcount_reset(page); /* remove PageBuddy */
373d4d09 336 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1da177e4
LT
337}
338
1da177e4
LT
339/*
340 * Higher-order pages are called "compound pages". They are structured thusly:
341 *
342 * The first PAGE_SIZE page is called the "head page".
343 *
344 * The remaining PAGE_SIZE pages are called "tail pages".
345 *
6416b9fa
WSH
346 * All pages have PG_compound set. All tail pages have their ->first_page
347 * pointing at the head page.
1da177e4 348 *
41d78ba5
HD
349 * The first tail page's ->lru.next holds the address of the compound page's
350 * put_page() function. Its ->lru.prev holds the order of allocation.
351 * This usage means that zero-order pages may not be compound.
1da177e4 352 */
d98c7a09
HD
353
354static void free_compound_page(struct page *page)
355{
d85f3385 356 __free_pages_ok(page, compound_order(page));
d98c7a09
HD
357}
358
01ad1c08 359void prep_compound_page(struct page *page, unsigned long order)
18229df5
AW
360{
361 int i;
362 int nr_pages = 1 << order;
363
364 set_compound_page_dtor(page, free_compound_page);
365 set_compound_order(page, order);
366 __SetPageHead(page);
367 for (i = 1; i < nr_pages; i++) {
368 struct page *p = page + i;
18229df5 369 __SetPageTail(p);
58a84aa9 370 set_page_count(p, 0);
18229df5
AW
371 p->first_page = page;
372 }
373}
374
59ff4216 375/* update __split_huge_page_refcount if you change this function */
8cc3b392 376static int destroy_compound_page(struct page *page, unsigned long order)
1da177e4
LT
377{
378 int i;
379 int nr_pages = 1 << order;
8cc3b392 380 int bad = 0;
1da177e4 381
0bb2c763 382 if (unlikely(compound_order(page) != order)) {
224abf92 383 bad_page(page);
8cc3b392
HD
384 bad++;
385 }
1da177e4 386
6d777953 387 __ClearPageHead(page);
8cc3b392 388
18229df5
AW
389 for (i = 1; i < nr_pages; i++) {
390 struct page *p = page + i;
1da177e4 391
e713a21d 392 if (unlikely(!PageTail(p) || (p->first_page != page))) {
224abf92 393 bad_page(page);
8cc3b392
HD
394 bad++;
395 }
d85f3385 396 __ClearPageTail(p);
1da177e4 397 }
8cc3b392
HD
398
399 return bad;
1da177e4 400}
1da177e4 401
17cf4406
NP
402static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
403{
404 int i;
405
6626c5d5
AM
406 /*
407 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
408 * and __GFP_HIGHMEM from hard or soft interrupt context.
409 */
725d704e 410 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
17cf4406
NP
411 for (i = 0; i < (1 << order); i++)
412 clear_highpage(page + i);
413}
414
c0a32fc5
SG
415#ifdef CONFIG_DEBUG_PAGEALLOC
416unsigned int _debug_guardpage_minorder;
417
418static int __init debug_guardpage_minorder_setup(char *buf)
419{
420 unsigned long res;
421
422 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
423 printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
424 return 0;
425 }
426 _debug_guardpage_minorder = res;
427 printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
428 return 0;
429}
430__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
431
432static inline void set_page_guard_flag(struct page *page)
433{
434 __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
435}
436
437static inline void clear_page_guard_flag(struct page *page)
438{
439 __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
440}
441#else
442static inline void set_page_guard_flag(struct page *page) { }
443static inline void clear_page_guard_flag(struct page *page) { }
444#endif
445
6aa3001b
AM
446static inline void set_page_order(struct page *page, int order)
447{
4c21e2f2 448 set_page_private(page, order);
676165a8 449 __SetPageBuddy(page);
1da177e4
LT
450}
451
452static inline void rmv_page_order(struct page *page)
453{
676165a8 454 __ClearPageBuddy(page);
4c21e2f2 455 set_page_private(page, 0);
1da177e4
LT
456}
457
458/*
459 * Locate the struct page for both the matching buddy in our
460 * pair (buddy1) and the combined O(n+1) page they form (page).
461 *
462 * 1) Any buddy B1 will have an order O twin B2 which satisfies
463 * the following equation:
464 * B2 = B1 ^ (1 << O)
465 * For example, if the starting buddy (buddy2) is #8 its order
466 * 1 buddy is #10:
467 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
468 *
469 * 2) Any buddy B will have an order O+1 parent P which
470 * satisfies the following equation:
471 * P = B & ~(1 << O)
472 *
d6e05edc 473 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
1da177e4 474 */
1da177e4 475static inline unsigned long
43506fad 476__find_buddy_index(unsigned long page_idx, unsigned int order)
1da177e4 477{
43506fad 478 return page_idx ^ (1 << order);
1da177e4
LT
479}
480
481/*
482 * This function checks whether a page is free && is the buddy
483 * we can do coalesce a page and its buddy if
13e7444b 484 * (a) the buddy is not in a hole &&
676165a8 485 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
486 * (c) a page and its buddy have the same order &&
487 * (d) a page and its buddy are in the same zone.
676165a8 488 *
5f24ce5f
AA
489 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
490 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
1da177e4 491 *
676165a8 492 * For recording page's order, we use page_private(page).
1da177e4 493 */
cb2b95e1
AW
494static inline int page_is_buddy(struct page *page, struct page *buddy,
495 int order)
1da177e4 496{
14e07298 497 if (!pfn_valid_within(page_to_pfn(buddy)))
13e7444b 498 return 0;
13e7444b 499
cb2b95e1
AW
500 if (page_zone_id(page) != page_zone_id(buddy))
501 return 0;
502
c0a32fc5
SG
503 if (page_is_guard(buddy) && page_order(buddy) == order) {
504 VM_BUG_ON(page_count(buddy) != 0);
505 return 1;
506 }
507
cb2b95e1 508 if (PageBuddy(buddy) && page_order(buddy) == order) {
a3af9c38 509 VM_BUG_ON(page_count(buddy) != 0);
6aa3001b 510 return 1;
676165a8 511 }
6aa3001b 512 return 0;
1da177e4
LT
513}
514
515/*
516 * Freeing function for a buddy system allocator.
517 *
518 * The concept of a buddy system is to maintain direct-mapped table
519 * (containing bit values) for memory blocks of various "orders".
520 * The bottom level table contains the map for the smallest allocatable
521 * units of memory (here, pages), and each level above it describes
522 * pairs of units from the levels below, hence, "buddies".
523 * At a high level, all that happens here is marking the table entry
524 * at the bottom level available, and propagating the changes upward
525 * as necessary, plus some accounting needed to play nicely with other
526 * parts of the VM system.
527 * At each level, we keep a list of pages, which are heads of continuous
5f24ce5f 528 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
4c21e2f2 529 * order is recorded in page_private(page) field.
1da177e4 530 * So when we are allocating or freeing one, we can derive the state of the
5f63b720
MN
531 * other. That is, if we allocate a small block, and both were
532 * free, the remainder of the region must be split into blocks.
1da177e4 533 * If a block is freed, and its buddy is also free, then this
5f63b720 534 * triggers coalescing into a block of larger size.
1da177e4 535 *
6d49e352 536 * -- nyc
1da177e4
LT
537 */
538
48db57f8 539static inline void __free_one_page(struct page *page,
ed0ae21d
MG
540 struct zone *zone, unsigned int order,
541 int migratetype)
1da177e4
LT
542{
543 unsigned long page_idx;
6dda9d55 544 unsigned long combined_idx;
43506fad 545 unsigned long uninitialized_var(buddy_idx);
6dda9d55 546 struct page *buddy;
1da177e4 547
d29bb978
CS
548 VM_BUG_ON(!zone_is_initialized(zone));
549
224abf92 550 if (unlikely(PageCompound(page)))
8cc3b392
HD
551 if (unlikely(destroy_compound_page(page, order)))
552 return;
1da177e4 553
ed0ae21d
MG
554 VM_BUG_ON(migratetype == -1);
555
1da177e4
LT
556 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
557
f2260e6b 558 VM_BUG_ON(page_idx & ((1 << order) - 1));
725d704e 559 VM_BUG_ON(bad_range(zone, page));
1da177e4 560
1da177e4 561 while (order < MAX_ORDER-1) {
43506fad
KC
562 buddy_idx = __find_buddy_index(page_idx, order);
563 buddy = page + (buddy_idx - page_idx);
cb2b95e1 564 if (!page_is_buddy(page, buddy, order))
3c82d0ce 565 break;
c0a32fc5
SG
566 /*
567 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
568 * merge with it and move up one order.
569 */
570 if (page_is_guard(buddy)) {
571 clear_page_guard_flag(buddy);
572 set_page_private(page, 0);
d1ce749a
BZ
573 __mod_zone_freepage_state(zone, 1 << order,
574 migratetype);
c0a32fc5
SG
575 } else {
576 list_del(&buddy->lru);
577 zone->free_area[order].nr_free--;
578 rmv_page_order(buddy);
579 }
43506fad 580 combined_idx = buddy_idx & page_idx;
1da177e4
LT
581 page = page + (combined_idx - page_idx);
582 page_idx = combined_idx;
583 order++;
584 }
585 set_page_order(page, order);
6dda9d55
CZ
586
587 /*
588 * If this is not the largest possible page, check if the buddy
589 * of the next-highest order is free. If it is, it's possible
590 * that pages are being freed that will coalesce soon. In case,
591 * that is happening, add the free page to the tail of the list
592 * so it's less likely to be used soon and more likely to be merged
593 * as a higher order page
594 */
b7f50cfa 595 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
6dda9d55 596 struct page *higher_page, *higher_buddy;
43506fad
KC
597 combined_idx = buddy_idx & page_idx;
598 higher_page = page + (combined_idx - page_idx);
599 buddy_idx = __find_buddy_index(combined_idx, order + 1);
0ba8f2d5 600 higher_buddy = higher_page + (buddy_idx - combined_idx);
6dda9d55
CZ
601 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
602 list_add_tail(&page->lru,
603 &zone->free_area[order].free_list[migratetype]);
604 goto out;
605 }
606 }
607
608 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
609out:
1da177e4
LT
610 zone->free_area[order].nr_free++;
611}
612
224abf92 613static inline int free_pages_check(struct page *page)
1da177e4 614{
92be2e33
NP
615 if (unlikely(page_mapcount(page) |
616 (page->mapping != NULL) |
a3af9c38 617 (atomic_read(&page->_count) != 0) |
f212ad7c
DN
618 (page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
619 (mem_cgroup_bad_page_check(page)))) {
224abf92 620 bad_page(page);
79f4b7bf 621 return 1;
8cc3b392 622 }
22b751c3 623 page_nid_reset_last(page);
79f4b7bf
HD
624 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
625 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
626 return 0;
1da177e4
LT
627}
628
629/*
5f8dcc21 630 * Frees a number of pages from the PCP lists
1da177e4 631 * Assumes all pages on list are in same zone, and of same order.
207f36ee 632 * count is the number of pages to free.
1da177e4
LT
633 *
634 * If the zone was previously in an "all pages pinned" state then look to
635 * see if this freeing clears that state.
636 *
637 * And clear the zone's pages_scanned counter, to hold off the "all pages are
638 * pinned" detection logic.
639 */
5f8dcc21
MG
640static void free_pcppages_bulk(struct zone *zone, int count,
641 struct per_cpu_pages *pcp)
1da177e4 642{
5f8dcc21 643 int migratetype = 0;
a6f9edd6 644 int batch_free = 0;
72853e29 645 int to_free = count;
5f8dcc21 646
c54ad30c 647 spin_lock(&zone->lock);
93e4a89a 648 zone->all_unreclaimable = 0;
1da177e4 649 zone->pages_scanned = 0;
f2260e6b 650
72853e29 651 while (to_free) {
48db57f8 652 struct page *page;
5f8dcc21
MG
653 struct list_head *list;
654
655 /*
a6f9edd6
MG
656 * Remove pages from lists in a round-robin fashion. A
657 * batch_free count is maintained that is incremented when an
658 * empty list is encountered. This is so more pages are freed
659 * off fuller lists instead of spinning excessively around empty
660 * lists
5f8dcc21
MG
661 */
662 do {
a6f9edd6 663 batch_free++;
5f8dcc21
MG
664 if (++migratetype == MIGRATE_PCPTYPES)
665 migratetype = 0;
666 list = &pcp->lists[migratetype];
667 } while (list_empty(list));
48db57f8 668
1d16871d
NK
669 /* This is the only non-empty list. Free them all. */
670 if (batch_free == MIGRATE_PCPTYPES)
671 batch_free = to_free;
672
a6f9edd6 673 do {
770c8aaa
BZ
674 int mt; /* migratetype of the to-be-freed page */
675
a6f9edd6
MG
676 page = list_entry(list->prev, struct page, lru);
677 /* must delete as __free_one_page list manipulates */
678 list_del(&page->lru);
b12c4ad1 679 mt = get_freepage_migratetype(page);
a7016235 680 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
770c8aaa
BZ
681 __free_one_page(page, zone, 0, mt);
682 trace_mm_page_pcpu_drain(page, 0, mt);
194159fb 683 if (likely(!is_migrate_isolate_page(page))) {
97d0da22
WC
684 __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
685 if (is_migrate_cma(mt))
686 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
687 }
72853e29 688 } while (--to_free && --batch_free && !list_empty(list));
1da177e4 689 }
c54ad30c 690 spin_unlock(&zone->lock);
1da177e4
LT
691}
692
ed0ae21d
MG
693static void free_one_page(struct zone *zone, struct page *page, int order,
694 int migratetype)
1da177e4 695{
006d22d9 696 spin_lock(&zone->lock);
93e4a89a 697 zone->all_unreclaimable = 0;
006d22d9 698 zone->pages_scanned = 0;
f2260e6b 699
ed0ae21d 700 __free_one_page(page, zone, order, migratetype);
194159fb 701 if (unlikely(!is_migrate_isolate(migratetype)))
d1ce749a 702 __mod_zone_freepage_state(zone, 1 << order, migratetype);
006d22d9 703 spin_unlock(&zone->lock);
48db57f8
NP
704}
705
ec95f53a 706static bool free_pages_prepare(struct page *page, unsigned int order)
48db57f8 707{
1da177e4 708 int i;
8cc3b392 709 int bad = 0;
1da177e4 710
b413d48a 711 trace_mm_page_free(page, order);
b1eeab67
VN
712 kmemcheck_free_shadow(page, order);
713
8dd60a3a
AA
714 if (PageAnon(page))
715 page->mapping = NULL;
716 for (i = 0; i < (1 << order); i++)
717 bad += free_pages_check(page + i);
8cc3b392 718 if (bad)
ec95f53a 719 return false;
689bcebf 720
3ac7fe5a 721 if (!PageHighMem(page)) {
9858db50 722 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
3ac7fe5a
TG
723 debug_check_no_obj_freed(page_address(page),
724 PAGE_SIZE << order);
725 }
dafb1367 726 arch_free_page(page, order);
48db57f8 727 kernel_map_pages(page, 1 << order, 0);
dafb1367 728
ec95f53a
KM
729 return true;
730}
731
732static void __free_pages_ok(struct page *page, unsigned int order)
733{
734 unsigned long flags;
95e34412 735 int migratetype;
ec95f53a
KM
736
737 if (!free_pages_prepare(page, order))
738 return;
739
c54ad30c 740 local_irq_save(flags);
f8891e5e 741 __count_vm_events(PGFREE, 1 << order);
95e34412
MK
742 migratetype = get_pageblock_migratetype(page);
743 set_freepage_migratetype(page, migratetype);
744 free_one_page(page_zone(page), page, order, migratetype);
c54ad30c 745 local_irq_restore(flags);
1da177e4
LT
746}
747
9feedc9d
JL
748/*
749 * Read access to zone->managed_pages is safe because it's unsigned long,
750 * but we still need to serialize writers. Currently all callers of
751 * __free_pages_bootmem() except put_page_bootmem() should only be used
752 * at boot time. So for shorter boot time, we shift the burden to
753 * put_page_bootmem() to serialize writers.
754 */
af370fb8 755void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
a226f6c8 756{
c3993076
JW
757 unsigned int nr_pages = 1 << order;
758 unsigned int loop;
a226f6c8 759
c3993076
JW
760 prefetchw(page);
761 for (loop = 0; loop < nr_pages; loop++) {
762 struct page *p = &page[loop];
763
764 if (loop + 1 < nr_pages)
765 prefetchw(p + 1);
766 __ClearPageReserved(p);
767 set_page_count(p, 0);
a226f6c8 768 }
c3993076 769
9feedc9d 770 page_zone(page)->managed_pages += 1 << order;
c3993076
JW
771 set_page_refcounted(page);
772 __free_pages(page, order);
a226f6c8
DH
773}
774
47118af0
MN
775#ifdef CONFIG_CMA
776/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
777void __init init_cma_reserved_pageblock(struct page *page)
778{
779 unsigned i = pageblock_nr_pages;
780 struct page *p = page;
781
782 do {
783 __ClearPageReserved(p);
784 set_page_count(p, 0);
785 } while (++p, --i);
786
787 set_page_refcounted(page);
788 set_pageblock_migratetype(page, MIGRATE_CMA);
789 __free_pages(page, pageblock_order);
790 totalram_pages += pageblock_nr_pages;
41a79734
MS
791#ifdef CONFIG_HIGHMEM
792 if (PageHighMem(page))
793 totalhigh_pages += pageblock_nr_pages;
794#endif
47118af0
MN
795}
796#endif
1da177e4
LT
797
798/*
799 * The order of subdivision here is critical for the IO subsystem.
800 * Please do not alter this order without good reasons and regression
801 * testing. Specifically, as large blocks of memory are subdivided,
802 * the order in which smaller blocks are delivered depends on the order
803 * they're subdivided in this function. This is the primary factor
804 * influencing the order in which pages are delivered to the IO
805 * subsystem according to empirical testing, and this is also justified
806 * by considering the behavior of a buddy system containing a single
807 * large block of memory acted on by a series of small allocations.
808 * This behavior is a critical factor in sglist merging's success.
809 *
6d49e352 810 * -- nyc
1da177e4 811 */
085cc7d5 812static inline void expand(struct zone *zone, struct page *page,
b2a0ac88
MG
813 int low, int high, struct free_area *area,
814 int migratetype)
1da177e4
LT
815{
816 unsigned long size = 1 << high;
817
818 while (high > low) {
819 area--;
820 high--;
821 size >>= 1;
725d704e 822 VM_BUG_ON(bad_range(zone, &page[size]));
c0a32fc5
SG
823
824#ifdef CONFIG_DEBUG_PAGEALLOC
825 if (high < debug_guardpage_minorder()) {
826 /*
827 * Mark as guard pages (or page), that will allow to
828 * merge back to allocator when buddy will be freed.
829 * Corresponding page table entries will not be touched,
830 * pages will stay not present in virtual address space
831 */
832 INIT_LIST_HEAD(&page[size].lru);
833 set_page_guard_flag(&page[size]);
834 set_page_private(&page[size], high);
835 /* Guard pages are not available for any usage */
d1ce749a
BZ
836 __mod_zone_freepage_state(zone, -(1 << high),
837 migratetype);
c0a32fc5
SG
838 continue;
839 }
840#endif
b2a0ac88 841 list_add(&page[size].lru, &area->free_list[migratetype]);
1da177e4
LT
842 area->nr_free++;
843 set_page_order(&page[size], high);
844 }
1da177e4
LT
845}
846
1da177e4
LT
847/*
848 * This page is about to be returned from the page allocator
849 */
2a7684a2 850static inline int check_new_page(struct page *page)
1da177e4 851{
92be2e33
NP
852 if (unlikely(page_mapcount(page) |
853 (page->mapping != NULL) |
a3af9c38 854 (atomic_read(&page->_count) != 0) |
f212ad7c
DN
855 (page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
856 (mem_cgroup_bad_page_check(page)))) {
224abf92 857 bad_page(page);
689bcebf 858 return 1;
8cc3b392 859 }
2a7684a2
WF
860 return 0;
861}
862
863static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
864{
865 int i;
866
867 for (i = 0; i < (1 << order); i++) {
868 struct page *p = page + i;
869 if (unlikely(check_new_page(p)))
870 return 1;
871 }
689bcebf 872
4c21e2f2 873 set_page_private(page, 0);
7835e98b 874 set_page_refcounted(page);
cc102509
NP
875
876 arch_alloc_page(page, order);
1da177e4 877 kernel_map_pages(page, 1 << order, 1);
17cf4406
NP
878
879 if (gfp_flags & __GFP_ZERO)
880 prep_zero_page(page, order, gfp_flags);
881
882 if (order && (gfp_flags & __GFP_COMP))
883 prep_compound_page(page, order);
884
689bcebf 885 return 0;
1da177e4
LT
886}
887
56fd56b8
MG
888/*
889 * Go through the free lists for the given migratetype and remove
890 * the smallest available page from the freelists
891 */
728ec980
MG
892static inline
893struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
56fd56b8
MG
894 int migratetype)
895{
896 unsigned int current_order;
897 struct free_area * area;
898 struct page *page;
899
900 /* Find a page of the appropriate size in the preferred list */
901 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
902 area = &(zone->free_area[current_order]);
903 if (list_empty(&area->free_list[migratetype]))
904 continue;
905
906 page = list_entry(area->free_list[migratetype].next,
907 struct page, lru);
908 list_del(&page->lru);
909 rmv_page_order(page);
910 area->nr_free--;
56fd56b8
MG
911 expand(zone, page, order, current_order, area, migratetype);
912 return page;
913 }
914
915 return NULL;
916}
917
918
b2a0ac88
MG
919/*
920 * This array describes the order lists are fallen back to when
921 * the free lists for the desirable migrate type are depleted
922 */
47118af0
MN
923static int fallbacks[MIGRATE_TYPES][4] = {
924 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
925 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
926#ifdef CONFIG_CMA
927 [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
928 [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
929#else
930 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
931#endif
6d4a4916 932 [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
194159fb 933#ifdef CONFIG_MEMORY_ISOLATION
6d4a4916 934 [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
194159fb 935#endif
b2a0ac88
MG
936};
937
c361be55
MG
938/*
939 * Move the free pages in a range to the free lists of the requested type.
d9c23400 940 * Note that start_page and end_pages are not aligned on a pageblock
c361be55
MG
941 * boundary. If alignment is required, use move_freepages_block()
942 */
435b405c 943int move_freepages(struct zone *zone,
b69a7288
AB
944 struct page *start_page, struct page *end_page,
945 int migratetype)
c361be55
MG
946{
947 struct page *page;
948 unsigned long order;
d100313f 949 int pages_moved = 0;
c361be55
MG
950
951#ifndef CONFIG_HOLES_IN_ZONE
952 /*
953 * page_zone is not safe to call in this context when
954 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
955 * anyway as we check zone boundaries in move_freepages_block().
956 * Remove at a later date when no bug reports exist related to
ac0e5b7a 957 * grouping pages by mobility
c361be55
MG
958 */
959 BUG_ON(page_zone(start_page) != page_zone(end_page));
960#endif
961
962 for (page = start_page; page <= end_page;) {
344c790e
AL
963 /* Make sure we are not inadvertently changing nodes */
964 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
965
c361be55
MG
966 if (!pfn_valid_within(page_to_pfn(page))) {
967 page++;
968 continue;
969 }
970
971 if (!PageBuddy(page)) {
972 page++;
973 continue;
974 }
975
976 order = page_order(page);
84be48d8
KS
977 list_move(&page->lru,
978 &zone->free_area[order].free_list[migratetype]);
95e34412 979 set_freepage_migratetype(page, migratetype);
c361be55 980 page += 1 << order;
d100313f 981 pages_moved += 1 << order;
c361be55
MG
982 }
983
d100313f 984 return pages_moved;
c361be55
MG
985}
986
ee6f509c 987int move_freepages_block(struct zone *zone, struct page *page,
68e3e926 988 int migratetype)
c361be55
MG
989{
990 unsigned long start_pfn, end_pfn;
991 struct page *start_page, *end_page;
992
993 start_pfn = page_to_pfn(page);
d9c23400 994 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
c361be55 995 start_page = pfn_to_page(start_pfn);
d9c23400
MG
996 end_page = start_page + pageblock_nr_pages - 1;
997 end_pfn = start_pfn + pageblock_nr_pages - 1;
c361be55
MG
998
999 /* Do not cross zone boundaries */
108bcc96 1000 if (!zone_spans_pfn(zone, start_pfn))
c361be55 1001 start_page = page;
108bcc96 1002 if (!zone_spans_pfn(zone, end_pfn))
c361be55
MG
1003 return 0;
1004
1005 return move_freepages(zone, start_page, end_page, migratetype);
1006}
1007
2f66a68f
MG
1008static void change_pageblock_range(struct page *pageblock_page,
1009 int start_order, int migratetype)
1010{
1011 int nr_pageblocks = 1 << (start_order - pageblock_order);
1012
1013 while (nr_pageblocks--) {
1014 set_pageblock_migratetype(pageblock_page, migratetype);
1015 pageblock_page += pageblock_nr_pages;
1016 }
1017}
1018
b2a0ac88 1019/* Remove an element from the buddy allocator from the fallback list */
0ac3a409
MG
1020static inline struct page *
1021__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
b2a0ac88
MG
1022{
1023 struct free_area * area;
1024 int current_order;
1025 struct page *page;
1026 int migratetype, i;
1027
1028 /* Find the largest possible block of pages in the other list */
1029 for (current_order = MAX_ORDER-1; current_order >= order;
1030 --current_order) {
6d4a4916 1031 for (i = 0;; i++) {
b2a0ac88
MG
1032 migratetype = fallbacks[start_migratetype][i];
1033
56fd56b8
MG
1034 /* MIGRATE_RESERVE handled later if necessary */
1035 if (migratetype == MIGRATE_RESERVE)
6d4a4916 1036 break;
e010487d 1037
b2a0ac88
MG
1038 area = &(zone->free_area[current_order]);
1039 if (list_empty(&area->free_list[migratetype]))
1040 continue;
1041
1042 page = list_entry(area->free_list[migratetype].next,
1043 struct page, lru);
1044 area->nr_free--;
1045
1046 /*
c361be55 1047 * If breaking a large block of pages, move all free
46dafbca
MG
1048 * pages to the preferred allocation list. If falling
1049 * back for a reclaimable kernel allocation, be more
25985edc 1050 * aggressive about taking ownership of free pages
47118af0
MN
1051 *
1052 * On the other hand, never change migration
1053 * type of MIGRATE_CMA pageblocks nor move CMA
1054 * pages on different free lists. We don't
1055 * want unmovable pages to be allocated from
1056 * MIGRATE_CMA areas.
b2a0ac88 1057 */
47118af0
MN
1058 if (!is_migrate_cma(migratetype) &&
1059 (unlikely(current_order >= pageblock_order / 2) ||
1060 start_migratetype == MIGRATE_RECLAIMABLE ||
1061 page_group_by_mobility_disabled)) {
1062 int pages;
46dafbca
MG
1063 pages = move_freepages_block(zone, page,
1064 start_migratetype);
1065
1066 /* Claim the whole block if over half of it is free */
dd5d241e
MG
1067 if (pages >= (1 << (pageblock_order-1)) ||
1068 page_group_by_mobility_disabled)
46dafbca
MG
1069 set_pageblock_migratetype(page,
1070 start_migratetype);
1071
b2a0ac88 1072 migratetype = start_migratetype;
c361be55 1073 }
b2a0ac88
MG
1074
1075 /* Remove the page from the freelists */
1076 list_del(&page->lru);
1077 rmv_page_order(page);
b2a0ac88 1078
2f66a68f 1079 /* Take ownership for orders >= pageblock_order */
47118af0
MN
1080 if (current_order >= pageblock_order &&
1081 !is_migrate_cma(migratetype))
2f66a68f 1082 change_pageblock_range(page, current_order,
b2a0ac88
MG
1083 start_migratetype);
1084
47118af0
MN
1085 expand(zone, page, order, current_order, area,
1086 is_migrate_cma(migratetype)
1087 ? migratetype : start_migratetype);
e0fff1bd
MG
1088
1089 trace_mm_page_alloc_extfrag(page, order, current_order,
1090 start_migratetype, migratetype);
1091
b2a0ac88
MG
1092 return page;
1093 }
1094 }
1095
728ec980 1096 return NULL;
b2a0ac88
MG
1097}
1098
56fd56b8 1099/*
1da177e4
LT
1100 * Do the hard work of removing an element from the buddy allocator.
1101 * Call me with the zone->lock already held.
1102 */
b2a0ac88
MG
1103static struct page *__rmqueue(struct zone *zone, unsigned int order,
1104 int migratetype)
1da177e4 1105{
1da177e4
LT
1106 struct page *page;
1107
728ec980 1108retry_reserve:
56fd56b8 1109 page = __rmqueue_smallest(zone, order, migratetype);
b2a0ac88 1110
728ec980 1111 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
56fd56b8 1112 page = __rmqueue_fallback(zone, order, migratetype);
b2a0ac88 1113
728ec980
MG
1114 /*
1115 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1116 * is used because __rmqueue_smallest is an inline function
1117 * and we want just one call site
1118 */
1119 if (!page) {
1120 migratetype = MIGRATE_RESERVE;
1121 goto retry_reserve;
1122 }
1123 }
1124
0d3d062a 1125 trace_mm_page_alloc_zone_locked(page, order, migratetype);
b2a0ac88 1126 return page;
1da177e4
LT
1127}
1128
5f63b720 1129/*
1da177e4
LT
1130 * Obtain a specified number of elements from the buddy allocator, all under
1131 * a single hold of the lock, for efficiency. Add them to the supplied list.
1132 * Returns the number of new pages which were placed at *list.
1133 */
5f63b720 1134static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88 1135 unsigned long count, struct list_head *list,
e084b2d9 1136 int migratetype, int cold)
1da177e4 1137{
47118af0 1138 int mt = migratetype, i;
5f63b720 1139
c54ad30c 1140 spin_lock(&zone->lock);
1da177e4 1141 for (i = 0; i < count; ++i) {
b2a0ac88 1142 struct page *page = __rmqueue(zone, order, migratetype);
085cc7d5 1143 if (unlikely(page == NULL))
1da177e4 1144 break;
81eabcbe
MG
1145
1146 /*
1147 * Split buddy pages returned by expand() are received here
1148 * in physical page order. The page is added to the callers and
1149 * list and the list head then moves forward. From the callers
1150 * perspective, the linked list is ordered by page number in
1151 * some conditions. This is useful for IO devices that can
1152 * merge IO requests if the physical pages are ordered
1153 * properly.
1154 */
e084b2d9
MG
1155 if (likely(cold == 0))
1156 list_add(&page->lru, list);
1157 else
1158 list_add_tail(&page->lru, list);
47118af0
MN
1159 if (IS_ENABLED(CONFIG_CMA)) {
1160 mt = get_pageblock_migratetype(page);
194159fb 1161 if (!is_migrate_cma(mt) && !is_migrate_isolate(mt))
47118af0
MN
1162 mt = migratetype;
1163 }
b12c4ad1 1164 set_freepage_migratetype(page, mt);
81eabcbe 1165 list = &page->lru;
d1ce749a
BZ
1166 if (is_migrate_cma(mt))
1167 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1168 -(1 << order));
1da177e4 1169 }
f2260e6b 1170 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
c54ad30c 1171 spin_unlock(&zone->lock);
085cc7d5 1172 return i;
1da177e4
LT
1173}
1174
4ae7c039 1175#ifdef CONFIG_NUMA
8fce4d8e 1176/*
4037d452
CL
1177 * Called from the vmstat counter updater to drain pagesets of this
1178 * currently executing processor on remote nodes after they have
1179 * expired.
1180 *
879336c3
CL
1181 * Note that this function must be called with the thread pinned to
1182 * a single processor.
8fce4d8e 1183 */
4037d452 1184void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 1185{
4ae7c039 1186 unsigned long flags;
4037d452 1187 int to_drain;
4ae7c039 1188
4037d452
CL
1189 local_irq_save(flags);
1190 if (pcp->count >= pcp->batch)
1191 to_drain = pcp->batch;
1192 else
1193 to_drain = pcp->count;
2a13515c
KM
1194 if (to_drain > 0) {
1195 free_pcppages_bulk(zone, to_drain, pcp);
1196 pcp->count -= to_drain;
1197 }
4037d452 1198 local_irq_restore(flags);
4ae7c039
CL
1199}
1200#endif
1201
9f8f2172
CL
1202/*
1203 * Drain pages of the indicated processor.
1204 *
1205 * The processor must either be the current processor and the
1206 * thread pinned to the current processor or a processor that
1207 * is not online.
1208 */
1209static void drain_pages(unsigned int cpu)
1da177e4 1210{
c54ad30c 1211 unsigned long flags;
1da177e4 1212 struct zone *zone;
1da177e4 1213
ee99c71c 1214 for_each_populated_zone(zone) {
1da177e4 1215 struct per_cpu_pageset *pset;
3dfa5721 1216 struct per_cpu_pages *pcp;
1da177e4 1217
99dcc3e5
CL
1218 local_irq_save(flags);
1219 pset = per_cpu_ptr(zone->pageset, cpu);
3dfa5721
CL
1220
1221 pcp = &pset->pcp;
2ff754fa
DR
1222 if (pcp->count) {
1223 free_pcppages_bulk(zone, pcp->count, pcp);
1224 pcp->count = 0;
1225 }
3dfa5721 1226 local_irq_restore(flags);
1da177e4
LT
1227 }
1228}
1da177e4 1229
9f8f2172
CL
1230/*
1231 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1232 */
1233void drain_local_pages(void *arg)
1234{
1235 drain_pages(smp_processor_id());
1236}
1237
1238/*
74046494
GBY
1239 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
1240 *
1241 * Note that this code is protected against sending an IPI to an offline
1242 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
1243 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
1244 * nothing keeps CPUs from showing up after we populated the cpumask and
1245 * before the call to on_each_cpu_mask().
9f8f2172
CL
1246 */
1247void drain_all_pages(void)
1248{
74046494
GBY
1249 int cpu;
1250 struct per_cpu_pageset *pcp;
1251 struct zone *zone;
1252
1253 /*
1254 * Allocate in the BSS so we wont require allocation in
1255 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
1256 */
1257 static cpumask_t cpus_with_pcps;
1258
1259 /*
1260 * We don't care about racing with CPU hotplug event
1261 * as offline notification will cause the notified
1262 * cpu to drain that CPU pcps and on_each_cpu_mask
1263 * disables preemption as part of its processing
1264 */
1265 for_each_online_cpu(cpu) {
1266 bool has_pcps = false;
1267 for_each_populated_zone(zone) {
1268 pcp = per_cpu_ptr(zone->pageset, cpu);
1269 if (pcp->pcp.count) {
1270 has_pcps = true;
1271 break;
1272 }
1273 }
1274 if (has_pcps)
1275 cpumask_set_cpu(cpu, &cpus_with_pcps);
1276 else
1277 cpumask_clear_cpu(cpu, &cpus_with_pcps);
1278 }
1279 on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
9f8f2172
CL
1280}
1281
296699de 1282#ifdef CONFIG_HIBERNATION
1da177e4
LT
1283
1284void mark_free_pages(struct zone *zone)
1285{
f623f0db
RW
1286 unsigned long pfn, max_zone_pfn;
1287 unsigned long flags;
b2a0ac88 1288 int order, t;
1da177e4
LT
1289 struct list_head *curr;
1290
1291 if (!zone->spanned_pages)
1292 return;
1293
1294 spin_lock_irqsave(&zone->lock, flags);
f623f0db 1295
108bcc96 1296 max_zone_pfn = zone_end_pfn(zone);
f623f0db
RW
1297 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1298 if (pfn_valid(pfn)) {
1299 struct page *page = pfn_to_page(pfn);
1300
7be98234
RW
1301 if (!swsusp_page_is_forbidden(page))
1302 swsusp_unset_page_free(page);
f623f0db 1303 }
1da177e4 1304
b2a0ac88
MG
1305 for_each_migratetype_order(order, t) {
1306 list_for_each(curr, &zone->free_area[order].free_list[t]) {
f623f0db 1307 unsigned long i;
1da177e4 1308
f623f0db
RW
1309 pfn = page_to_pfn(list_entry(curr, struct page, lru));
1310 for (i = 0; i < (1UL << order); i++)
7be98234 1311 swsusp_set_page_free(pfn_to_page(pfn + i));
f623f0db 1312 }
b2a0ac88 1313 }
1da177e4
LT
1314 spin_unlock_irqrestore(&zone->lock, flags);
1315}
e2c55dc8 1316#endif /* CONFIG_PM */
1da177e4 1317
1da177e4
LT
1318/*
1319 * Free a 0-order page
fc91668e 1320 * cold == 1 ? free a cold page : free a hot page
1da177e4 1321 */
fc91668e 1322void free_hot_cold_page(struct page *page, int cold)
1da177e4
LT
1323{
1324 struct zone *zone = page_zone(page);
1325 struct per_cpu_pages *pcp;
1326 unsigned long flags;
5f8dcc21 1327 int migratetype;
1da177e4 1328
ec95f53a 1329 if (!free_pages_prepare(page, 0))
689bcebf
HD
1330 return;
1331
5f8dcc21 1332 migratetype = get_pageblock_migratetype(page);
b12c4ad1 1333 set_freepage_migratetype(page, migratetype);
1da177e4 1334 local_irq_save(flags);
f8891e5e 1335 __count_vm_event(PGFREE);
da456f14 1336
5f8dcc21
MG
1337 /*
1338 * We only track unmovable, reclaimable and movable on pcp lists.
1339 * Free ISOLATE pages back to the allocator because they are being
1340 * offlined but treat RESERVE as movable pages so we can get those
1341 * areas back if necessary. Otherwise, we may have to free
1342 * excessively into the page allocator
1343 */
1344 if (migratetype >= MIGRATE_PCPTYPES) {
194159fb 1345 if (unlikely(is_migrate_isolate(migratetype))) {
5f8dcc21
MG
1346 free_one_page(zone, page, 0, migratetype);
1347 goto out;
1348 }
1349 migratetype = MIGRATE_MOVABLE;
1350 }
1351
99dcc3e5 1352 pcp = &this_cpu_ptr(zone->pageset)->pcp;
3dfa5721 1353 if (cold)
5f8dcc21 1354 list_add_tail(&page->lru, &pcp->lists[migratetype]);
3dfa5721 1355 else
5f8dcc21 1356 list_add(&page->lru, &pcp->lists[migratetype]);
1da177e4 1357 pcp->count++;
48db57f8 1358 if (pcp->count >= pcp->high) {
5f8dcc21 1359 free_pcppages_bulk(zone, pcp->batch, pcp);
48db57f8
NP
1360 pcp->count -= pcp->batch;
1361 }
5f8dcc21
MG
1362
1363out:
1da177e4 1364 local_irq_restore(flags);
1da177e4
LT
1365}
1366
cc59850e
KK
1367/*
1368 * Free a list of 0-order pages
1369 */
1370void free_hot_cold_page_list(struct list_head *list, int cold)
1371{
1372 struct page *page, *next;
1373
1374 list_for_each_entry_safe(page, next, list, lru) {
b413d48a 1375 trace_mm_page_free_batched(page, cold);
cc59850e
KK
1376 free_hot_cold_page(page, cold);
1377 }
1378}
1379
8dfcc9ba
NP
1380/*
1381 * split_page takes a non-compound higher-order page, and splits it into
1382 * n (1<<order) sub-pages: page[0..n]
1383 * Each sub-page must be freed individually.
1384 *
1385 * Note: this is probably too low level an operation for use in drivers.
1386 * Please consult with lkml before using this in your driver.
1387 */
1388void split_page(struct page *page, unsigned int order)
1389{
1390 int i;
1391
725d704e
NP
1392 VM_BUG_ON(PageCompound(page));
1393 VM_BUG_ON(!page_count(page));
b1eeab67
VN
1394
1395#ifdef CONFIG_KMEMCHECK
1396 /*
1397 * Split shadow pages too, because free(page[0]) would
1398 * otherwise free the whole shadow.
1399 */
1400 if (kmemcheck_page_is_tracked(page))
1401 split_page(virt_to_page(page[0].shadow), order);
1402#endif
1403
7835e98b
NP
1404 for (i = 1; i < (1 << order); i++)
1405 set_page_refcounted(page + i);
8dfcc9ba 1406}
8dfcc9ba 1407
8fb74b9f 1408static int __isolate_free_page(struct page *page, unsigned int order)
748446bb 1409{
748446bb
MG
1410 unsigned long watermark;
1411 struct zone *zone;
2139cbe6 1412 int mt;
748446bb
MG
1413
1414 BUG_ON(!PageBuddy(page));
1415
1416 zone = page_zone(page);
2e30abd1 1417 mt = get_pageblock_migratetype(page);
748446bb 1418
194159fb 1419 if (!is_migrate_isolate(mt)) {
2e30abd1
MS
1420 /* Obey watermarks as if the page was being allocated */
1421 watermark = low_wmark_pages(zone) + (1 << order);
1422 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1423 return 0;
1424
8fb74b9f 1425 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2e30abd1 1426 }
748446bb
MG
1427
1428 /* Remove page from free list */
1429 list_del(&page->lru);
1430 zone->free_area[order].nr_free--;
1431 rmv_page_order(page);
2139cbe6 1432
8fb74b9f 1433 /* Set the pageblock if the isolated page is at least a pageblock */
748446bb
MG
1434 if (order >= pageblock_order - 1) {
1435 struct page *endpage = page + (1 << order) - 1;
47118af0
MN
1436 for (; page < endpage; page += pageblock_nr_pages) {
1437 int mt = get_pageblock_migratetype(page);
194159fb 1438 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
47118af0
MN
1439 set_pageblock_migratetype(page,
1440 MIGRATE_MOVABLE);
1441 }
748446bb
MG
1442 }
1443
8fb74b9f 1444 return 1UL << order;
1fb3f8ca
MG
1445}
1446
1447/*
1448 * Similar to split_page except the page is already free. As this is only
1449 * being used for migration, the migratetype of the block also changes.
1450 * As this is called with interrupts disabled, the caller is responsible
1451 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1452 * are enabled.
1453 *
1454 * Note: this is probably too low level an operation for use in drivers.
1455 * Please consult with lkml before using this in your driver.
1456 */
1457int split_free_page(struct page *page)
1458{
1459 unsigned int order;
1460 int nr_pages;
1461
1fb3f8ca
MG
1462 order = page_order(page);
1463
8fb74b9f 1464 nr_pages = __isolate_free_page(page, order);
1fb3f8ca
MG
1465 if (!nr_pages)
1466 return 0;
1467
1468 /* Split into individual pages */
1469 set_page_refcounted(page);
1470 split_page(page, order);
1471 return nr_pages;
748446bb
MG
1472}
1473
1da177e4
LT
1474/*
1475 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
1476 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1477 * or two.
1478 */
0a15c3e9
MG
1479static inline
1480struct page *buffered_rmqueue(struct zone *preferred_zone,
3dd28266
MG
1481 struct zone *zone, int order, gfp_t gfp_flags,
1482 int migratetype)
1da177e4
LT
1483{
1484 unsigned long flags;
689bcebf 1485 struct page *page;
1da177e4
LT
1486 int cold = !!(gfp_flags & __GFP_COLD);
1487
689bcebf 1488again:
48db57f8 1489 if (likely(order == 0)) {
1da177e4 1490 struct per_cpu_pages *pcp;
5f8dcc21 1491 struct list_head *list;
1da177e4 1492
1da177e4 1493 local_irq_save(flags);
99dcc3e5
CL
1494 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1495 list = &pcp->lists[migratetype];
5f8dcc21 1496 if (list_empty(list)) {
535131e6 1497 pcp->count += rmqueue_bulk(zone, 0,
5f8dcc21 1498 pcp->batch, list,
e084b2d9 1499 migratetype, cold);
5f8dcc21 1500 if (unlikely(list_empty(list)))
6fb332fa 1501 goto failed;
535131e6 1502 }
b92a6edd 1503
5f8dcc21
MG
1504 if (cold)
1505 page = list_entry(list->prev, struct page, lru);
1506 else
1507 page = list_entry(list->next, struct page, lru);
1508
b92a6edd
MG
1509 list_del(&page->lru);
1510 pcp->count--;
7fb1d9fc 1511 } else {
dab48dab
AM
1512 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1513 /*
1514 * __GFP_NOFAIL is not to be used in new code.
1515 *
1516 * All __GFP_NOFAIL callers should be fixed so that they
1517 * properly detect and handle allocation failures.
1518 *
1519 * We most definitely don't want callers attempting to
4923abf9 1520 * allocate greater than order-1 page units with
dab48dab
AM
1521 * __GFP_NOFAIL.
1522 */
4923abf9 1523 WARN_ON_ONCE(order > 1);
dab48dab 1524 }
1da177e4 1525 spin_lock_irqsave(&zone->lock, flags);
b2a0ac88 1526 page = __rmqueue(zone, order, migratetype);
a74609fa
NP
1527 spin_unlock(&zone->lock);
1528 if (!page)
1529 goto failed;
d1ce749a
BZ
1530 __mod_zone_freepage_state(zone, -(1 << order),
1531 get_pageblock_migratetype(page));
1da177e4
LT
1532 }
1533
f8891e5e 1534 __count_zone_vm_events(PGALLOC, zone, 1 << order);
78afd561 1535 zone_statistics(preferred_zone, zone, gfp_flags);
a74609fa 1536 local_irq_restore(flags);
1da177e4 1537
725d704e 1538 VM_BUG_ON(bad_range(zone, page));
17cf4406 1539 if (prep_new_page(page, order, gfp_flags))
a74609fa 1540 goto again;
1da177e4 1541 return page;
a74609fa
NP
1542
1543failed:
1544 local_irq_restore(flags);
a74609fa 1545 return NULL;
1da177e4
LT
1546}
1547
933e312e
AM
1548#ifdef CONFIG_FAIL_PAGE_ALLOC
1549
b2588c4b 1550static struct {
933e312e
AM
1551 struct fault_attr attr;
1552
1553 u32 ignore_gfp_highmem;
1554 u32 ignore_gfp_wait;
54114994 1555 u32 min_order;
933e312e
AM
1556} fail_page_alloc = {
1557 .attr = FAULT_ATTR_INITIALIZER,
6b1b60f4
DM
1558 .ignore_gfp_wait = 1,
1559 .ignore_gfp_highmem = 1,
54114994 1560 .min_order = 1,
933e312e
AM
1561};
1562
1563static int __init setup_fail_page_alloc(char *str)
1564{
1565 return setup_fault_attr(&fail_page_alloc.attr, str);
1566}
1567__setup("fail_page_alloc=", setup_fail_page_alloc);
1568
deaf386e 1569static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 1570{
54114994 1571 if (order < fail_page_alloc.min_order)
deaf386e 1572 return false;
933e312e 1573 if (gfp_mask & __GFP_NOFAIL)
deaf386e 1574 return false;
933e312e 1575 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
deaf386e 1576 return false;
933e312e 1577 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
deaf386e 1578 return false;
933e312e
AM
1579
1580 return should_fail(&fail_page_alloc.attr, 1 << order);
1581}
1582
1583#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1584
1585static int __init fail_page_alloc_debugfs(void)
1586{
f4ae40a6 1587 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
933e312e 1588 struct dentry *dir;
933e312e 1589
dd48c085
AM
1590 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
1591 &fail_page_alloc.attr);
1592 if (IS_ERR(dir))
1593 return PTR_ERR(dir);
933e312e 1594
b2588c4b
AM
1595 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
1596 &fail_page_alloc.ignore_gfp_wait))
1597 goto fail;
1598 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1599 &fail_page_alloc.ignore_gfp_highmem))
1600 goto fail;
1601 if (!debugfs_create_u32("min-order", mode, dir,
1602 &fail_page_alloc.min_order))
1603 goto fail;
1604
1605 return 0;
1606fail:
dd48c085 1607 debugfs_remove_recursive(dir);
933e312e 1608
b2588c4b 1609 return -ENOMEM;
933e312e
AM
1610}
1611
1612late_initcall(fail_page_alloc_debugfs);
1613
1614#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1615
1616#else /* CONFIG_FAIL_PAGE_ALLOC */
1617
deaf386e 1618static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 1619{
deaf386e 1620 return false;
933e312e
AM
1621}
1622
1623#endif /* CONFIG_FAIL_PAGE_ALLOC */
1624
1da177e4 1625/*
88f5acf8 1626 * Return true if free pages are above 'mark'. This takes into account the order
1da177e4
LT
1627 * of the allocation.
1628 */
88f5acf8
MG
1629static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1630 int classzone_idx, int alloc_flags, long free_pages)
1da177e4
LT
1631{
1632 /* free_pages my go negative - that's OK */
d23ad423 1633 long min = mark;
2cfed075 1634 long lowmem_reserve = z->lowmem_reserve[classzone_idx];
1da177e4
LT
1635 int o;
1636
df0a6daa 1637 free_pages -= (1 << order) - 1;
7fb1d9fc 1638 if (alloc_flags & ALLOC_HIGH)
1da177e4 1639 min -= min / 2;
7fb1d9fc 1640 if (alloc_flags & ALLOC_HARDER)
1da177e4 1641 min -= min / 4;
d95ea5d1
BZ
1642#ifdef CONFIG_CMA
1643 /* If allocation can't use CMA areas don't use free CMA pages */
1644 if (!(alloc_flags & ALLOC_CMA))
1645 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
1646#endif
2cfed075 1647 if (free_pages <= min + lowmem_reserve)
88f5acf8 1648 return false;
1da177e4
LT
1649 for (o = 0; o < order; o++) {
1650 /* At the next order, this order's pages become unavailable */
1651 free_pages -= z->free_area[o].nr_free << o;
1652
1653 /* Require fewer higher order pages to be free */
1654 min >>= 1;
1655
1656 if (free_pages <= min)
88f5acf8 1657 return false;
1da177e4 1658 }
88f5acf8
MG
1659 return true;
1660}
1661
1662bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1663 int classzone_idx, int alloc_flags)
1664{
1665 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1666 zone_page_state(z, NR_FREE_PAGES));
1667}
1668
1669bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1670 int classzone_idx, int alloc_flags)
1671{
1672 long free_pages = zone_page_state(z, NR_FREE_PAGES);
1673
1674 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1675 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1676
1677 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1678 free_pages);
1da177e4
LT
1679}
1680
9276b1bc
PJ
1681#ifdef CONFIG_NUMA
1682/*
1683 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1684 * skip over zones that are not allowed by the cpuset, or that have
1685 * been recently (in last second) found to be nearly full. See further
1686 * comments in mmzone.h. Reduces cache footprint of zonelist scans
183ff22b 1687 * that have to skip over a lot of full or unallowed zones.
9276b1bc
PJ
1688 *
1689 * If the zonelist cache is present in the passed in zonelist, then
1690 * returns a pointer to the allowed node mask (either the current
4b0ef1fe 1691 * tasks mems_allowed, or node_states[N_MEMORY].)
9276b1bc
PJ
1692 *
1693 * If the zonelist cache is not available for this zonelist, does
1694 * nothing and returns NULL.
1695 *
1696 * If the fullzones BITMAP in the zonelist cache is stale (more than
1697 * a second since last zap'd) then we zap it out (clear its bits.)
1698 *
1699 * We hold off even calling zlc_setup, until after we've checked the
1700 * first zone in the zonelist, on the theory that most allocations will
1701 * be satisfied from that first zone, so best to examine that zone as
1702 * quickly as we can.
1703 */
1704static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1705{
1706 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1707 nodemask_t *allowednodes; /* zonelist_cache approximation */
1708
1709 zlc = zonelist->zlcache_ptr;
1710 if (!zlc)
1711 return NULL;
1712
f05111f5 1713 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
9276b1bc
PJ
1714 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1715 zlc->last_full_zap = jiffies;
1716 }
1717
1718 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1719 &cpuset_current_mems_allowed :
4b0ef1fe 1720 &node_states[N_MEMORY];
9276b1bc
PJ
1721 return allowednodes;
1722}
1723
1724/*
1725 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1726 * if it is worth looking at further for free memory:
1727 * 1) Check that the zone isn't thought to be full (doesn't have its
1728 * bit set in the zonelist_cache fullzones BITMAP).
1729 * 2) Check that the zones node (obtained from the zonelist_cache
1730 * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1731 * Return true (non-zero) if zone is worth looking at further, or
1732 * else return false (zero) if it is not.
1733 *
1734 * This check -ignores- the distinction between various watermarks,
1735 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
1736 * found to be full for any variation of these watermarks, it will
1737 * be considered full for up to one second by all requests, unless
1738 * we are so low on memory on all allowed nodes that we are forced
1739 * into the second scan of the zonelist.
1740 *
1741 * In the second scan we ignore this zonelist cache and exactly
1742 * apply the watermarks to all zones, even it is slower to do so.
1743 * We are low on memory in the second scan, and should leave no stone
1744 * unturned looking for a free page.
1745 */
dd1a239f 1746static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1747 nodemask_t *allowednodes)
1748{
1749 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1750 int i; /* index of *z in zonelist zones */
1751 int n; /* node that zone *z is on */
1752
1753 zlc = zonelist->zlcache_ptr;
1754 if (!zlc)
1755 return 1;
1756
dd1a239f 1757 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1758 n = zlc->z_to_n[i];
1759
1760 /* This zone is worth trying if it is allowed but not full */
1761 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1762}
1763
1764/*
1765 * Given 'z' scanning a zonelist, set the corresponding bit in
1766 * zlc->fullzones, so that subsequent attempts to allocate a page
1767 * from that zone don't waste time re-examining it.
1768 */
dd1a239f 1769static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1770{
1771 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1772 int i; /* index of *z in zonelist zones */
1773
1774 zlc = zonelist->zlcache_ptr;
1775 if (!zlc)
1776 return;
1777
dd1a239f 1778 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1779
1780 set_bit(i, zlc->fullzones);
1781}
1782
76d3fbf8
MG
1783/*
1784 * clear all zones full, called after direct reclaim makes progress so that
1785 * a zone that was recently full is not skipped over for up to a second
1786 */
1787static void zlc_clear_zones_full(struct zonelist *zonelist)
1788{
1789 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1790
1791 zlc = zonelist->zlcache_ptr;
1792 if (!zlc)
1793 return;
1794
1795 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1796}
1797
957f822a
DR
1798static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
1799{
1800 return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes);
1801}
1802
1803static void __paginginit init_zone_allows_reclaim(int nid)
1804{
1805 int i;
1806
1807 for_each_online_node(i)
6b187d02 1808 if (node_distance(nid, i) <= RECLAIM_DISTANCE)
957f822a 1809 node_set(i, NODE_DATA(nid)->reclaim_nodes);
6b187d02 1810 else
957f822a 1811 zone_reclaim_mode = 1;
957f822a
DR
1812}
1813
9276b1bc
PJ
1814#else /* CONFIG_NUMA */
1815
1816static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1817{
1818 return NULL;
1819}
1820
dd1a239f 1821static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1822 nodemask_t *allowednodes)
1823{
1824 return 1;
1825}
1826
dd1a239f 1827static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1828{
1829}
76d3fbf8
MG
1830
1831static void zlc_clear_zones_full(struct zonelist *zonelist)
1832{
1833}
957f822a
DR
1834
1835static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
1836{
1837 return true;
1838}
1839
1840static inline void init_zone_allows_reclaim(int nid)
1841{
1842}
9276b1bc
PJ
1843#endif /* CONFIG_NUMA */
1844
7fb1d9fc 1845/*
0798e519 1846 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
1847 * a page.
1848 */
1849static struct page *
19770b32 1850get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
5117f45d 1851 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
3dd28266 1852 struct zone *preferred_zone, int migratetype)
753ee728 1853{
dd1a239f 1854 struct zoneref *z;
7fb1d9fc 1855 struct page *page = NULL;
54a6eb5c 1856 int classzone_idx;
5117f45d 1857 struct zone *zone;
9276b1bc
PJ
1858 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1859 int zlc_active = 0; /* set if using zonelist_cache */
1860 int did_zlc_setup = 0; /* just call zlc_setup() one time */
54a6eb5c 1861
19770b32 1862 classzone_idx = zone_idx(preferred_zone);
9276b1bc 1863zonelist_scan:
7fb1d9fc 1864 /*
9276b1bc 1865 * Scan zonelist, looking for a zone with enough free.
7fb1d9fc
RS
1866 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1867 */
19770b32
MG
1868 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1869 high_zoneidx, nodemask) {
e5adfffc 1870 if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
9276b1bc
PJ
1871 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1872 continue;
7fb1d9fc 1873 if ((alloc_flags & ALLOC_CPUSET) &&
02a0e53d 1874 !cpuset_zone_allowed_softwall(zone, gfp_mask))
cd38b115 1875 continue;
a756cf59
JW
1876 /*
1877 * When allocating a page cache page for writing, we
1878 * want to get it from a zone that is within its dirty
1879 * limit, such that no single zone holds more than its
1880 * proportional share of globally allowed dirty pages.
1881 * The dirty limits take into account the zone's
1882 * lowmem reserves and high watermark so that kswapd
1883 * should be able to balance it without having to
1884 * write pages from its LRU list.
1885 *
1886 * This may look like it could increase pressure on
1887 * lower zones by failing allocations in higher zones
1888 * before they are full. But the pages that do spill
1889 * over are limited as the lower zones are protected
1890 * by this very same mechanism. It should not become
1891 * a practical burden to them.
1892 *
1893 * XXX: For now, allow allocations to potentially
1894 * exceed the per-zone dirty limit in the slowpath
1895 * (ALLOC_WMARK_LOW unset) before going into reclaim,
1896 * which is important when on a NUMA setup the allowed
1897 * zones are together not big enough to reach the
1898 * global limit. The proper fix for these situations
1899 * will require awareness of zones in the
1900 * dirty-throttling and the flusher threads.
1901 */
1902 if ((alloc_flags & ALLOC_WMARK_LOW) &&
1903 (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
1904 goto this_zone_full;
7fb1d9fc 1905
41858966 1906 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
7fb1d9fc 1907 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
3148890b 1908 unsigned long mark;
fa5e084e
MG
1909 int ret;
1910
41858966 1911 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
fa5e084e
MG
1912 if (zone_watermark_ok(zone, order, mark,
1913 classzone_idx, alloc_flags))
1914 goto try_this_zone;
1915
e5adfffc
KS
1916 if (IS_ENABLED(CONFIG_NUMA) &&
1917 !did_zlc_setup && nr_online_nodes > 1) {
cd38b115
MG
1918 /*
1919 * we do zlc_setup if there are multiple nodes
1920 * and before considering the first zone allowed
1921 * by the cpuset.
1922 */
1923 allowednodes = zlc_setup(zonelist, alloc_flags);
1924 zlc_active = 1;
1925 did_zlc_setup = 1;
1926 }
1927
957f822a
DR
1928 if (zone_reclaim_mode == 0 ||
1929 !zone_allows_reclaim(preferred_zone, zone))
fa5e084e
MG
1930 goto this_zone_full;
1931
cd38b115
MG
1932 /*
1933 * As we may have just activated ZLC, check if the first
1934 * eligible zone has failed zone_reclaim recently.
1935 */
e5adfffc 1936 if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
cd38b115
MG
1937 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1938 continue;
1939
fa5e084e
MG
1940 ret = zone_reclaim(zone, gfp_mask, order);
1941 switch (ret) {
1942 case ZONE_RECLAIM_NOSCAN:
1943 /* did not scan */
cd38b115 1944 continue;
fa5e084e
MG
1945 case ZONE_RECLAIM_FULL:
1946 /* scanned but unreclaimable */
cd38b115 1947 continue;
fa5e084e
MG
1948 default:
1949 /* did we reclaim enough */
1950 if (!zone_watermark_ok(zone, order, mark,
1951 classzone_idx, alloc_flags))
9276b1bc 1952 goto this_zone_full;
0798e519 1953 }
7fb1d9fc
RS
1954 }
1955
fa5e084e 1956try_this_zone:
3dd28266
MG
1957 page = buffered_rmqueue(preferred_zone, zone, order,
1958 gfp_mask, migratetype);
0798e519 1959 if (page)
7fb1d9fc 1960 break;
9276b1bc 1961this_zone_full:
e5adfffc 1962 if (IS_ENABLED(CONFIG_NUMA))
9276b1bc 1963 zlc_mark_zone_full(zonelist, z);
54a6eb5c 1964 }
9276b1bc 1965
e5adfffc 1966 if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) {
9276b1bc
PJ
1967 /* Disable zlc cache for second zonelist scan */
1968 zlc_active = 0;
1969 goto zonelist_scan;
1970 }
b121186a
AS
1971
1972 if (page)
1973 /*
1974 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
1975 * necessary to allocate the page. The expectation is
1976 * that the caller is taking steps that will free more
1977 * memory. The caller should avoid the page being used
1978 * for !PFMEMALLOC purposes.
1979 */
1980 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
1981
7fb1d9fc 1982 return page;
753ee728
MH
1983}
1984
29423e77
DR
1985/*
1986 * Large machines with many possible nodes should not always dump per-node
1987 * meminfo in irq context.
1988 */
1989static inline bool should_suppress_show_mem(void)
1990{
1991 bool ret = false;
1992
1993#if NODES_SHIFT > 8
1994 ret = in_interrupt();
1995#endif
1996 return ret;
1997}
1998
a238ab5b
DH
1999static DEFINE_RATELIMIT_STATE(nopage_rs,
2000 DEFAULT_RATELIMIT_INTERVAL,
2001 DEFAULT_RATELIMIT_BURST);
2002
2003void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
2004{
a238ab5b
DH
2005 unsigned int filter = SHOW_MEM_FILTER_NODES;
2006
c0a32fc5
SG
2007 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2008 debug_guardpage_minorder() > 0)
a238ab5b
DH
2009 return;
2010
2011 /*
2012 * This documents exceptions given to allocations in certain
2013 * contexts that are allowed to allocate outside current's set
2014 * of allowed nodes.
2015 */
2016 if (!(gfp_mask & __GFP_NOMEMALLOC))
2017 if (test_thread_flag(TIF_MEMDIE) ||
2018 (current->flags & (PF_MEMALLOC | PF_EXITING)))
2019 filter &= ~SHOW_MEM_FILTER_NODES;
2020 if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
2021 filter &= ~SHOW_MEM_FILTER_NODES;
2022
2023 if (fmt) {
3ee9a4f0
JP
2024 struct va_format vaf;
2025 va_list args;
2026
a238ab5b 2027 va_start(args, fmt);
3ee9a4f0
JP
2028
2029 vaf.fmt = fmt;
2030 vaf.va = &args;
2031
2032 pr_warn("%pV", &vaf);
2033
a238ab5b
DH
2034 va_end(args);
2035 }
2036
3ee9a4f0
JP
2037 pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
2038 current->comm, order, gfp_mask);
a238ab5b
DH
2039
2040 dump_stack();
2041 if (!should_suppress_show_mem())
2042 show_mem(filter);
2043}
2044
11e33f6a
MG
2045static inline int
2046should_alloc_retry(gfp_t gfp_mask, unsigned int order,
f90ac398 2047 unsigned long did_some_progress,
11e33f6a 2048 unsigned long pages_reclaimed)
1da177e4 2049{
11e33f6a
MG
2050 /* Do not loop if specifically requested */
2051 if (gfp_mask & __GFP_NORETRY)
2052 return 0;
1da177e4 2053
f90ac398
MG
2054 /* Always retry if specifically requested */
2055 if (gfp_mask & __GFP_NOFAIL)
2056 return 1;
2057
2058 /*
2059 * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
2060 * making forward progress without invoking OOM. Suspend also disables
2061 * storage devices so kswapd will not help. Bail if we are suspending.
2062 */
2063 if (!did_some_progress && pm_suspended_storage())
2064 return 0;
2065
11e33f6a
MG
2066 /*
2067 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
2068 * means __GFP_NOFAIL, but that may not be true in other
2069 * implementations.
2070 */
2071 if (order <= PAGE_ALLOC_COSTLY_ORDER)
2072 return 1;
2073
2074 /*
2075 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
2076 * specified, then we retry until we no longer reclaim any pages
2077 * (above), or we've reclaimed an order of pages at least as
2078 * large as the allocation's order. In both cases, if the
2079 * allocation still fails, we stop retrying.
2080 */
2081 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
2082 return 1;
cf40bd16 2083
11e33f6a
MG
2084 return 0;
2085}
933e312e 2086
11e33f6a
MG
2087static inline struct page *
2088__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2089 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
2090 nodemask_t *nodemask, struct zone *preferred_zone,
2091 int migratetype)
11e33f6a
MG
2092{
2093 struct page *page;
2094
2095 /* Acquire the OOM killer lock for the zones in zonelist */
ff321fea 2096 if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
11e33f6a 2097 schedule_timeout_uninterruptible(1);
1da177e4
LT
2098 return NULL;
2099 }
6b1de916 2100
11e33f6a
MG
2101 /*
2102 * Go through the zonelist yet one more time, keep very high watermark
2103 * here, this is only to catch a parallel oom killing, we must fail if
2104 * we're still under heavy pressure.
2105 */
2106 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
2107 order, zonelist, high_zoneidx,
5117f45d 2108 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
3dd28266 2109 preferred_zone, migratetype);
7fb1d9fc 2110 if (page)
11e33f6a
MG
2111 goto out;
2112
4365a567
KH
2113 if (!(gfp_mask & __GFP_NOFAIL)) {
2114 /* The OOM killer will not help higher order allocs */
2115 if (order > PAGE_ALLOC_COSTLY_ORDER)
2116 goto out;
03668b3c
DR
2117 /* The OOM killer does not needlessly kill tasks for lowmem */
2118 if (high_zoneidx < ZONE_NORMAL)
2119 goto out;
4365a567
KH
2120 /*
2121 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
2122 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
2123 * The caller should handle page allocation failure by itself if
2124 * it specifies __GFP_THISNODE.
2125 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
2126 */
2127 if (gfp_mask & __GFP_THISNODE)
2128 goto out;
2129 }
11e33f6a 2130 /* Exhausted what can be done so it's blamo time */
08ab9b10 2131 out_of_memory(zonelist, gfp_mask, order, nodemask, false);
11e33f6a
MG
2132
2133out:
2134 clear_zonelist_oom(zonelist, gfp_mask);
2135 return page;
2136}
2137
56de7263
MG
2138#ifdef CONFIG_COMPACTION
2139/* Try memory compaction for high-order allocations before reclaim */
2140static struct page *
2141__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2142 struct zonelist *zonelist, enum zone_type high_zoneidx,
2143 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
66199712 2144 int migratetype, bool sync_migration,
c67fe375 2145 bool *contended_compaction, bool *deferred_compaction,
66199712 2146 unsigned long *did_some_progress)
56de7263 2147{
66199712 2148 if (!order)
56de7263
MG
2149 return NULL;
2150
aff62249 2151 if (compaction_deferred(preferred_zone, order)) {
66199712
MG
2152 *deferred_compaction = true;
2153 return NULL;
2154 }
2155
c06b1fca 2156 current->flags |= PF_MEMALLOC;
56de7263 2157 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
c67fe375 2158 nodemask, sync_migration,
8fb74b9f 2159 contended_compaction);
c06b1fca 2160 current->flags &= ~PF_MEMALLOC;
56de7263 2161
1fb3f8ca 2162 if (*did_some_progress != COMPACT_SKIPPED) {
8fb74b9f
MG
2163 struct page *page;
2164
56de7263
MG
2165 /* Page migration frees to the PCP lists but we want merging */
2166 drain_pages(get_cpu());
2167 put_cpu();
2168
2169 page = get_page_from_freelist(gfp_mask, nodemask,
2170 order, zonelist, high_zoneidx,
cfd19c5a
MG
2171 alloc_flags & ~ALLOC_NO_WATERMARKS,
2172 preferred_zone, migratetype);
56de7263 2173 if (page) {
62997027 2174 preferred_zone->compact_blockskip_flush = false;
4f92e258
MG
2175 preferred_zone->compact_considered = 0;
2176 preferred_zone->compact_defer_shift = 0;
aff62249
RR
2177 if (order >= preferred_zone->compact_order_failed)
2178 preferred_zone->compact_order_failed = order + 1;
56de7263
MG
2179 count_vm_event(COMPACTSUCCESS);
2180 return page;
2181 }
2182
2183 /*
2184 * It's bad if compaction run occurs and fails.
2185 * The most likely reason is that pages exist,
2186 * but not enough to satisfy watermarks.
2187 */
2188 count_vm_event(COMPACTFAIL);
66199712
MG
2189
2190 /*
2191 * As async compaction considers a subset of pageblocks, only
2192 * defer if the failure was a sync compaction failure.
2193 */
2194 if (sync_migration)
aff62249 2195 defer_compaction(preferred_zone, order);
56de7263
MG
2196
2197 cond_resched();
2198 }
2199
2200 return NULL;
2201}
2202#else
2203static inline struct page *
2204__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2205 struct zonelist *zonelist, enum zone_type high_zoneidx,
2206 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
66199712 2207 int migratetype, bool sync_migration,
c67fe375 2208 bool *contended_compaction, bool *deferred_compaction,
66199712 2209 unsigned long *did_some_progress)
56de7263
MG
2210{
2211 return NULL;
2212}
2213#endif /* CONFIG_COMPACTION */
2214
bba90710
MS
2215/* Perform direct synchronous page reclaim */
2216static int
2217__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
2218 nodemask_t *nodemask)
11e33f6a 2219{
11e33f6a 2220 struct reclaim_state reclaim_state;
bba90710 2221 int progress;
11e33f6a
MG
2222
2223 cond_resched();
2224
2225 /* We now go into synchronous reclaim */
2226 cpuset_memory_pressure_bump();
c06b1fca 2227 current->flags |= PF_MEMALLOC;
11e33f6a
MG
2228 lockdep_set_current_reclaim_state(gfp_mask);
2229 reclaim_state.reclaimed_slab = 0;
c06b1fca 2230 current->reclaim_state = &reclaim_state;
11e33f6a 2231
bba90710 2232 progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
11e33f6a 2233
c06b1fca 2234 current->reclaim_state = NULL;
11e33f6a 2235 lockdep_clear_current_reclaim_state();
c06b1fca 2236 current->flags &= ~PF_MEMALLOC;
11e33f6a
MG
2237
2238 cond_resched();
2239
bba90710
MS
2240 return progress;
2241}
2242
2243/* The really slow allocator path where we enter direct reclaim */
2244static inline struct page *
2245__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2246 struct zonelist *zonelist, enum zone_type high_zoneidx,
2247 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2248 int migratetype, unsigned long *did_some_progress)
2249{
2250 struct page *page = NULL;
2251 bool drained = false;
2252
2253 *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
2254 nodemask);
9ee493ce
MG
2255 if (unlikely(!(*did_some_progress)))
2256 return NULL;
11e33f6a 2257
76d3fbf8 2258 /* After successful reclaim, reconsider all zones for allocation */
e5adfffc 2259 if (IS_ENABLED(CONFIG_NUMA))
76d3fbf8
MG
2260 zlc_clear_zones_full(zonelist);
2261
9ee493ce
MG
2262retry:
2263 page = get_page_from_freelist(gfp_mask, nodemask, order,
5117f45d 2264 zonelist, high_zoneidx,
cfd19c5a
MG
2265 alloc_flags & ~ALLOC_NO_WATERMARKS,
2266 preferred_zone, migratetype);
9ee493ce
MG
2267
2268 /*
2269 * If an allocation failed after direct reclaim, it could be because
2270 * pages are pinned on the per-cpu lists. Drain them and try again
2271 */
2272 if (!page && !drained) {
2273 drain_all_pages();
2274 drained = true;
2275 goto retry;
2276 }
2277
11e33f6a
MG
2278 return page;
2279}
2280
1da177e4 2281/*
11e33f6a
MG
2282 * This is called in the allocator slow-path if the allocation request is of
2283 * sufficient urgency to ignore watermarks and take other desperate measures
1da177e4 2284 */
11e33f6a
MG
2285static inline struct page *
2286__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2287 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
2288 nodemask_t *nodemask, struct zone *preferred_zone,
2289 int migratetype)
11e33f6a
MG
2290{
2291 struct page *page;
2292
2293 do {
2294 page = get_page_from_freelist(gfp_mask, nodemask, order,
5117f45d 2295 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
3dd28266 2296 preferred_zone, migratetype);
11e33f6a
MG
2297
2298 if (!page && gfp_mask & __GFP_NOFAIL)
0e093d99 2299 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
11e33f6a
MG
2300 } while (!page && (gfp_mask & __GFP_NOFAIL));
2301
2302 return page;
2303}
2304
2305static inline
2306void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
99504748
MG
2307 enum zone_type high_zoneidx,
2308 enum zone_type classzone_idx)
1da177e4 2309{
dd1a239f
MG
2310 struct zoneref *z;
2311 struct zone *zone;
1da177e4 2312
11e33f6a 2313 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
99504748 2314 wakeup_kswapd(zone, order, classzone_idx);
11e33f6a 2315}
cf40bd16 2316
341ce06f
PZ
2317static inline int
2318gfp_to_alloc_flags(gfp_t gfp_mask)
2319{
341ce06f
PZ
2320 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2321 const gfp_t wait = gfp_mask & __GFP_WAIT;
1da177e4 2322
a56f57ff 2323 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
e6223a3b 2324 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
933e312e 2325
341ce06f
PZ
2326 /*
2327 * The caller may dip into page reserves a bit more if the caller
2328 * cannot run direct reclaim, or if the caller has realtime scheduling
2329 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
2330 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
2331 */
e6223a3b 2332 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
1da177e4 2333
341ce06f 2334 if (!wait) {
5c3240d9
AA
2335 /*
2336 * Not worth trying to allocate harder for
2337 * __GFP_NOMEMALLOC even if it can't schedule.
2338 */
2339 if (!(gfp_mask & __GFP_NOMEMALLOC))
2340 alloc_flags |= ALLOC_HARDER;
523b9458 2341 /*
341ce06f
PZ
2342 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
2343 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
523b9458 2344 */
341ce06f 2345 alloc_flags &= ~ALLOC_CPUSET;
c06b1fca 2346 } else if (unlikely(rt_task(current)) && !in_interrupt())
341ce06f
PZ
2347 alloc_flags |= ALLOC_HARDER;
2348
b37f1dd0
MG
2349 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2350 if (gfp_mask & __GFP_MEMALLOC)
2351 alloc_flags |= ALLOC_NO_WATERMARKS;
907aed48
MG
2352 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
2353 alloc_flags |= ALLOC_NO_WATERMARKS;
2354 else if (!in_interrupt() &&
2355 ((current->flags & PF_MEMALLOC) ||
2356 unlikely(test_thread_flag(TIF_MEMDIE))))
341ce06f 2357 alloc_flags |= ALLOC_NO_WATERMARKS;
1da177e4 2358 }
d95ea5d1
BZ
2359#ifdef CONFIG_CMA
2360 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2361 alloc_flags |= ALLOC_CMA;
2362#endif
341ce06f
PZ
2363 return alloc_flags;
2364}
2365
072bb0aa
MG
2366bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
2367{
b37f1dd0 2368 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
072bb0aa
MG
2369}
2370
11e33f6a
MG
2371static inline struct page *
2372__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2373 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
2374 nodemask_t *nodemask, struct zone *preferred_zone,
2375 int migratetype)
11e33f6a
MG
2376{
2377 const gfp_t wait = gfp_mask & __GFP_WAIT;
2378 struct page *page = NULL;
2379 int alloc_flags;
2380 unsigned long pages_reclaimed = 0;
2381 unsigned long did_some_progress;
77f1fe6b 2382 bool sync_migration = false;
66199712 2383 bool deferred_compaction = false;
c67fe375 2384 bool contended_compaction = false;
1da177e4 2385
72807a74
MG
2386 /*
2387 * In the slowpath, we sanity check order to avoid ever trying to
2388 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2389 * be using allocators in order of preference for an area that is
2390 * too large.
2391 */
1fc28b70
MG
2392 if (order >= MAX_ORDER) {
2393 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
72807a74 2394 return NULL;
1fc28b70 2395 }
1da177e4 2396
952f3b51
CL
2397 /*
2398 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2399 * __GFP_NOWARN set) should not cause reclaim since the subsystem
2400 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2401 * using a larger set of nodes after it has established that the
2402 * allowed per node queues are empty and that nodes are
2403 * over allocated.
2404 */
e5adfffc
KS
2405 if (IS_ENABLED(CONFIG_NUMA) &&
2406 (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
952f3b51
CL
2407 goto nopage;
2408
cc4a6851 2409restart:
caf49191
LT
2410 if (!(gfp_mask & __GFP_NO_KSWAPD))
2411 wake_all_kswapd(order, zonelist, high_zoneidx,
2412 zone_idx(preferred_zone));
1da177e4 2413
9bf2229f 2414 /*
7fb1d9fc
RS
2415 * OK, we're below the kswapd watermark and have kicked background
2416 * reclaim. Now things get more complex, so set up alloc_flags according
2417 * to how we want to proceed.
9bf2229f 2418 */
341ce06f 2419 alloc_flags = gfp_to_alloc_flags(gfp_mask);
1da177e4 2420
f33261d7
DR
2421 /*
2422 * Find the true preferred zone if the allocation is unconstrained by
2423 * cpusets.
2424 */
2425 if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2426 first_zones_zonelist(zonelist, high_zoneidx, NULL,
2427 &preferred_zone);
2428
cfa54a0f 2429rebalance:
341ce06f 2430 /* This is the last chance, in general, before the goto nopage. */
19770b32 2431 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
341ce06f
PZ
2432 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2433 preferred_zone, migratetype);
7fb1d9fc
RS
2434 if (page)
2435 goto got_pg;
1da177e4 2436
11e33f6a 2437 /* Allocate without watermarks if the context allows */
341ce06f 2438 if (alloc_flags & ALLOC_NO_WATERMARKS) {
183f6371
MG
2439 /*
2440 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
2441 * the allocation is high priority and these type of
2442 * allocations are system rather than user orientated
2443 */
2444 zonelist = node_zonelist(numa_node_id(), gfp_mask);
2445
341ce06f
PZ
2446 page = __alloc_pages_high_priority(gfp_mask, order,
2447 zonelist, high_zoneidx, nodemask,
2448 preferred_zone, migratetype);
cfd19c5a 2449 if (page) {
341ce06f 2450 goto got_pg;
cfd19c5a 2451 }
1da177e4
LT
2452 }
2453
2454 /* Atomic allocations - we can't balance anything */
2455 if (!wait)
2456 goto nopage;
2457
341ce06f 2458 /* Avoid recursion of direct reclaim */
c06b1fca 2459 if (current->flags & PF_MEMALLOC)
341ce06f
PZ
2460 goto nopage;
2461
6583bb64
DR
2462 /* Avoid allocations with no watermarks from looping endlessly */
2463 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2464 goto nopage;
2465
77f1fe6b
MG
2466 /*
2467 * Try direct compaction. The first pass is asynchronous. Subsequent
2468 * attempts after direct reclaim are synchronous
2469 */
56de7263
MG
2470 page = __alloc_pages_direct_compact(gfp_mask, order,
2471 zonelist, high_zoneidx,
2472 nodemask,
2473 alloc_flags, preferred_zone,
66199712 2474 migratetype, sync_migration,
c67fe375 2475 &contended_compaction,
66199712
MG
2476 &deferred_compaction,
2477 &did_some_progress);
56de7263
MG
2478 if (page)
2479 goto got_pg;
c6a140bf 2480 sync_migration = true;
56de7263 2481
31f8d42d
LT
2482 /*
2483 * If compaction is deferred for high-order allocations, it is because
2484 * sync compaction recently failed. In this is the case and the caller
2485 * requested a movable allocation that does not heavily disrupt the
2486 * system then fail the allocation instead of entering direct reclaim.
2487 */
2488 if ((deferred_compaction || contended_compaction) &&
caf49191 2489 (gfp_mask & __GFP_NO_KSWAPD))
31f8d42d 2490 goto nopage;
66199712 2491
11e33f6a
MG
2492 /* Try direct reclaim and then allocating */
2493 page = __alloc_pages_direct_reclaim(gfp_mask, order,
2494 zonelist, high_zoneidx,
2495 nodemask,
5117f45d 2496 alloc_flags, preferred_zone,
3dd28266 2497 migratetype, &did_some_progress);
11e33f6a
MG
2498 if (page)
2499 goto got_pg;
1da177e4 2500
e33c3b5e 2501 /*
11e33f6a
MG
2502 * If we failed to make any progress reclaiming, then we are
2503 * running out of options and have to consider going OOM
e33c3b5e 2504 */
11e33f6a
MG
2505 if (!did_some_progress) {
2506 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
7f33d49a
RW
2507 if (oom_killer_disabled)
2508 goto nopage;
29fd66d2
DR
2509 /* Coredumps can quickly deplete all memory reserves */
2510 if ((current->flags & PF_DUMPCORE) &&
2511 !(gfp_mask & __GFP_NOFAIL))
2512 goto nopage;
11e33f6a
MG
2513 page = __alloc_pages_may_oom(gfp_mask, order,
2514 zonelist, high_zoneidx,
3dd28266
MG
2515 nodemask, preferred_zone,
2516 migratetype);
11e33f6a
MG
2517 if (page)
2518 goto got_pg;
1da177e4 2519
03668b3c
DR
2520 if (!(gfp_mask & __GFP_NOFAIL)) {
2521 /*
2522 * The oom killer is not called for high-order
2523 * allocations that may fail, so if no progress
2524 * is being made, there are no other options and
2525 * retrying is unlikely to help.
2526 */
2527 if (order > PAGE_ALLOC_COSTLY_ORDER)
2528 goto nopage;
2529 /*
2530 * The oom killer is not called for lowmem
2531 * allocations to prevent needlessly killing
2532 * innocent tasks.
2533 */
2534 if (high_zoneidx < ZONE_NORMAL)
2535 goto nopage;
2536 }
e2c55dc8 2537
ff0ceb9d
DR
2538 goto restart;
2539 }
1da177e4
LT
2540 }
2541
11e33f6a 2542 /* Check if we should retry the allocation */
a41f24ea 2543 pages_reclaimed += did_some_progress;
f90ac398
MG
2544 if (should_alloc_retry(gfp_mask, order, did_some_progress,
2545 pages_reclaimed)) {
11e33f6a 2546 /* Wait for some write requests to complete then retry */
0e093d99 2547 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
1da177e4 2548 goto rebalance;
3e7d3449
MG
2549 } else {
2550 /*
2551 * High-order allocations do not necessarily loop after
2552 * direct reclaim and reclaim/compaction depends on compaction
2553 * being called after reclaim so call directly if necessary
2554 */
2555 page = __alloc_pages_direct_compact(gfp_mask, order,
2556 zonelist, high_zoneidx,
2557 nodemask,
2558 alloc_flags, preferred_zone,
66199712 2559 migratetype, sync_migration,
c67fe375 2560 &contended_compaction,
66199712
MG
2561 &deferred_compaction,
2562 &did_some_progress);
3e7d3449
MG
2563 if (page)
2564 goto got_pg;
1da177e4
LT
2565 }
2566
2567nopage:
a238ab5b 2568 warn_alloc_failed(gfp_mask, order, NULL);
b1eeab67 2569 return page;
1da177e4 2570got_pg:
b1eeab67
VN
2571 if (kmemcheck_enabled)
2572 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
11e33f6a 2573
072bb0aa 2574 return page;
1da177e4 2575}
11e33f6a
MG
2576
2577/*
2578 * This is the 'heart' of the zoned buddy allocator.
2579 */
2580struct page *
2581__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2582 struct zonelist *zonelist, nodemask_t *nodemask)
2583{
2584 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
5117f45d 2585 struct zone *preferred_zone;
cc9a6c87 2586 struct page *page = NULL;
3dd28266 2587 int migratetype = allocflags_to_migratetype(gfp_mask);
cc9a6c87 2588 unsigned int cpuset_mems_cookie;
d95ea5d1 2589 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
6a1a0d3b 2590 struct mem_cgroup *memcg = NULL;
11e33f6a 2591
dcce284a
BH
2592 gfp_mask &= gfp_allowed_mask;
2593
11e33f6a
MG
2594 lockdep_trace_alloc(gfp_mask);
2595
2596 might_sleep_if(gfp_mask & __GFP_WAIT);
2597
2598 if (should_fail_alloc_page(gfp_mask, order))
2599 return NULL;
2600
2601 /*
2602 * Check the zones suitable for the gfp_mask contain at least one
2603 * valid zone. It's possible to have an empty zonelist as a result
2604 * of GFP_THISNODE and a memoryless node
2605 */
2606 if (unlikely(!zonelist->_zonerefs->zone))
2607 return NULL;
2608
6a1a0d3b
GC
2609 /*
2610 * Will only have any effect when __GFP_KMEMCG is set. This is
2611 * verified in the (always inline) callee
2612 */
2613 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
2614 return NULL;
2615
cc9a6c87
MG
2616retry_cpuset:
2617 cpuset_mems_cookie = get_mems_allowed();
2618
5117f45d 2619 /* The preferred zone is used for statistics later */
f33261d7
DR
2620 first_zones_zonelist(zonelist, high_zoneidx,
2621 nodemask ? : &cpuset_current_mems_allowed,
2622 &preferred_zone);
cc9a6c87
MG
2623 if (!preferred_zone)
2624 goto out;
5117f45d 2625
d95ea5d1
BZ
2626#ifdef CONFIG_CMA
2627 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2628 alloc_flags |= ALLOC_CMA;
2629#endif
5117f45d 2630 /* First allocation attempt */
11e33f6a 2631 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
d95ea5d1 2632 zonelist, high_zoneidx, alloc_flags,
3dd28266 2633 preferred_zone, migratetype);
21caf2fc
ML
2634 if (unlikely(!page)) {
2635 /*
2636 * Runtime PM, block IO and its error handling path
2637 * can deadlock because I/O on the device might not
2638 * complete.
2639 */
2640 gfp_mask = memalloc_noio_flags(gfp_mask);
11e33f6a 2641 page = __alloc_pages_slowpath(gfp_mask, order,
5117f45d 2642 zonelist, high_zoneidx, nodemask,
3dd28266 2643 preferred_zone, migratetype);
21caf2fc 2644 }
11e33f6a 2645
4b4f278c 2646 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
cc9a6c87
MG
2647
2648out:
2649 /*
2650 * When updating a task's mems_allowed, it is possible to race with
2651 * parallel threads in such a way that an allocation can fail while
2652 * the mask is being updated. If a page allocation is about to fail,
2653 * check if the cpuset changed during allocation and if so, retry.
2654 */
2655 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2656 goto retry_cpuset;
2657
6a1a0d3b
GC
2658 memcg_kmem_commit_charge(page, memcg, order);
2659
11e33f6a 2660 return page;
1da177e4 2661}
d239171e 2662EXPORT_SYMBOL(__alloc_pages_nodemask);
1da177e4
LT
2663
2664/*
2665 * Common helper functions.
2666 */
920c7a5d 2667unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4 2668{
945a1113
AM
2669 struct page *page;
2670
2671 /*
2672 * __get_free_pages() returns a 32-bit address, which cannot represent
2673 * a highmem page
2674 */
2675 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2676
1da177e4
LT
2677 page = alloc_pages(gfp_mask, order);
2678 if (!page)
2679 return 0;
2680 return (unsigned long) page_address(page);
2681}
1da177e4
LT
2682EXPORT_SYMBOL(__get_free_pages);
2683
920c7a5d 2684unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4 2685{
945a1113 2686 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1da177e4 2687}
1da177e4
LT
2688EXPORT_SYMBOL(get_zeroed_page);
2689
920c7a5d 2690void __free_pages(struct page *page, unsigned int order)
1da177e4 2691{
b5810039 2692 if (put_page_testzero(page)) {
1da177e4 2693 if (order == 0)
fc91668e 2694 free_hot_cold_page(page, 0);
1da177e4
LT
2695 else
2696 __free_pages_ok(page, order);
2697 }
2698}
2699
2700EXPORT_SYMBOL(__free_pages);
2701
920c7a5d 2702void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
2703{
2704 if (addr != 0) {
725d704e 2705 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
2706 __free_pages(virt_to_page((void *)addr), order);
2707 }
2708}
2709
2710EXPORT_SYMBOL(free_pages);
2711
6a1a0d3b
GC
2712/*
2713 * __free_memcg_kmem_pages and free_memcg_kmem_pages will free
2714 * pages allocated with __GFP_KMEMCG.
2715 *
2716 * Those pages are accounted to a particular memcg, embedded in the
2717 * corresponding page_cgroup. To avoid adding a hit in the allocator to search
2718 * for that information only to find out that it is NULL for users who have no
2719 * interest in that whatsoever, we provide these functions.
2720 *
2721 * The caller knows better which flags it relies on.
2722 */
2723void __free_memcg_kmem_pages(struct page *page, unsigned int order)
2724{
2725 memcg_kmem_uncharge_pages(page, order);
2726 __free_pages(page, order);
2727}
2728
2729void free_memcg_kmem_pages(unsigned long addr, unsigned int order)
2730{
2731 if (addr != 0) {
2732 VM_BUG_ON(!virt_addr_valid((void *)addr));
2733 __free_memcg_kmem_pages(virt_to_page((void *)addr), order);
2734 }
2735}
2736
ee85c2e1
AK
2737static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2738{
2739 if (addr) {
2740 unsigned long alloc_end = addr + (PAGE_SIZE << order);
2741 unsigned long used = addr + PAGE_ALIGN(size);
2742
2743 split_page(virt_to_page((void *)addr), order);
2744 while (used < alloc_end) {
2745 free_page(used);
2746 used += PAGE_SIZE;
2747 }
2748 }
2749 return (void *)addr;
2750}
2751
2be0ffe2
TT
2752/**
2753 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2754 * @size: the number of bytes to allocate
2755 * @gfp_mask: GFP flags for the allocation
2756 *
2757 * This function is similar to alloc_pages(), except that it allocates the
2758 * minimum number of pages to satisfy the request. alloc_pages() can only
2759 * allocate memory in power-of-two pages.
2760 *
2761 * This function is also limited by MAX_ORDER.
2762 *
2763 * Memory allocated by this function must be released by free_pages_exact().
2764 */
2765void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2766{
2767 unsigned int order = get_order(size);
2768 unsigned long addr;
2769
2770 addr = __get_free_pages(gfp_mask, order);
ee85c2e1 2771 return make_alloc_exact(addr, order, size);
2be0ffe2
TT
2772}
2773EXPORT_SYMBOL(alloc_pages_exact);
2774
ee85c2e1
AK
2775/**
2776 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
2777 * pages on a node.
b5e6ab58 2778 * @nid: the preferred node ID where memory should be allocated
ee85c2e1
AK
2779 * @size: the number of bytes to allocate
2780 * @gfp_mask: GFP flags for the allocation
2781 *
2782 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
2783 * back.
2784 * Note this is not alloc_pages_exact_node() which allocates on a specific node,
2785 * but is not exact.
2786 */
2787void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
2788{
2789 unsigned order = get_order(size);
2790 struct page *p = alloc_pages_node(nid, gfp_mask, order);
2791 if (!p)
2792 return NULL;
2793 return make_alloc_exact((unsigned long)page_address(p), order, size);
2794}
2795EXPORT_SYMBOL(alloc_pages_exact_nid);
2796
2be0ffe2
TT
2797/**
2798 * free_pages_exact - release memory allocated via alloc_pages_exact()
2799 * @virt: the value returned by alloc_pages_exact.
2800 * @size: size of allocation, same value as passed to alloc_pages_exact().
2801 *
2802 * Release the memory allocated by a previous call to alloc_pages_exact.
2803 */
2804void free_pages_exact(void *virt, size_t size)
2805{
2806 unsigned long addr = (unsigned long)virt;
2807 unsigned long end = addr + PAGE_ALIGN(size);
2808
2809 while (addr < end) {
2810 free_page(addr);
2811 addr += PAGE_SIZE;
2812 }
2813}
2814EXPORT_SYMBOL(free_pages_exact);
2815
e0fb5815
ZY
2816/**
2817 * nr_free_zone_pages - count number of pages beyond high watermark
2818 * @offset: The zone index of the highest zone
2819 *
2820 * nr_free_zone_pages() counts the number of counts pages which are beyond the
2821 * high watermark within all zones at or below a given zone index. For each
2822 * zone, the number of pages is calculated as:
2823 * present_pages - high_pages
2824 */
ebec3862 2825static unsigned long nr_free_zone_pages(int offset)
1da177e4 2826{
dd1a239f 2827 struct zoneref *z;
54a6eb5c
MG
2828 struct zone *zone;
2829
e310fd43 2830 /* Just pick one node, since fallback list is circular */
ebec3862 2831 unsigned long sum = 0;
1da177e4 2832
0e88460d 2833 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1da177e4 2834
54a6eb5c 2835 for_each_zone_zonelist(zone, z, zonelist, offset) {
b40da049 2836 unsigned long size = zone->managed_pages;
41858966 2837 unsigned long high = high_wmark_pages(zone);
e310fd43
MB
2838 if (size > high)
2839 sum += size - high;
1da177e4
LT
2840 }
2841
2842 return sum;
2843}
2844
e0fb5815
ZY
2845/**
2846 * nr_free_buffer_pages - count number of pages beyond high watermark
2847 *
2848 * nr_free_buffer_pages() counts the number of pages which are beyond the high
2849 * watermark within ZONE_DMA and ZONE_NORMAL.
1da177e4 2850 */
ebec3862 2851unsigned long nr_free_buffer_pages(void)
1da177e4 2852{
af4ca457 2853 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 2854}
c2f1a551 2855EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4 2856
e0fb5815
ZY
2857/**
2858 * nr_free_pagecache_pages - count number of pages beyond high watermark
2859 *
2860 * nr_free_pagecache_pages() counts the number of pages which are beyond the
2861 * high watermark within all zones.
1da177e4 2862 */
ebec3862 2863unsigned long nr_free_pagecache_pages(void)
1da177e4 2864{
2a1e274a 2865 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1da177e4 2866}
08e0f6a9
CL
2867
2868static inline void show_node(struct zone *zone)
1da177e4 2869{
e5adfffc 2870 if (IS_ENABLED(CONFIG_NUMA))
25ba77c1 2871 printk("Node %d ", zone_to_nid(zone));
1da177e4 2872}
1da177e4 2873
1da177e4
LT
2874void si_meminfo(struct sysinfo *val)
2875{
2876 val->totalram = totalram_pages;
2877 val->sharedram = 0;
d23ad423 2878 val->freeram = global_page_state(NR_FREE_PAGES);
1da177e4 2879 val->bufferram = nr_blockdev_pages();
1da177e4
LT
2880 val->totalhigh = totalhigh_pages;
2881 val->freehigh = nr_free_highpages();
1da177e4
LT
2882 val->mem_unit = PAGE_SIZE;
2883}
2884
2885EXPORT_SYMBOL(si_meminfo);
2886
2887#ifdef CONFIG_NUMA
2888void si_meminfo_node(struct sysinfo *val, int nid)
2889{
2890 pg_data_t *pgdat = NODE_DATA(nid);
2891
2892 val->totalram = pgdat->node_present_pages;
d23ad423 2893 val->freeram = node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 2894#ifdef CONFIG_HIGHMEM
b40da049 2895 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
d23ad423
CL
2896 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2897 NR_FREE_PAGES);
98d2b0eb
CL
2898#else
2899 val->totalhigh = 0;
2900 val->freehigh = 0;
2901#endif
1da177e4
LT
2902 val->mem_unit = PAGE_SIZE;
2903}
2904#endif
2905
ddd588b5 2906/*
7bf02ea2
DR
2907 * Determine whether the node should be displayed or not, depending on whether
2908 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
ddd588b5 2909 */
7bf02ea2 2910bool skip_free_areas_node(unsigned int flags, int nid)
ddd588b5
DR
2911{
2912 bool ret = false;
cc9a6c87 2913 unsigned int cpuset_mems_cookie;
ddd588b5
DR
2914
2915 if (!(flags & SHOW_MEM_FILTER_NODES))
2916 goto out;
2917
cc9a6c87
MG
2918 do {
2919 cpuset_mems_cookie = get_mems_allowed();
2920 ret = !node_isset(nid, cpuset_current_mems_allowed);
2921 } while (!put_mems_allowed(cpuset_mems_cookie));
ddd588b5
DR
2922out:
2923 return ret;
2924}
2925
1da177e4
LT
2926#define K(x) ((x) << (PAGE_SHIFT-10))
2927
377e4f16
RV
2928static void show_migration_types(unsigned char type)
2929{
2930 static const char types[MIGRATE_TYPES] = {
2931 [MIGRATE_UNMOVABLE] = 'U',
2932 [MIGRATE_RECLAIMABLE] = 'E',
2933 [MIGRATE_MOVABLE] = 'M',
2934 [MIGRATE_RESERVE] = 'R',
2935#ifdef CONFIG_CMA
2936 [MIGRATE_CMA] = 'C',
2937#endif
194159fb 2938#ifdef CONFIG_MEMORY_ISOLATION
377e4f16 2939 [MIGRATE_ISOLATE] = 'I',
194159fb 2940#endif
377e4f16
RV
2941 };
2942 char tmp[MIGRATE_TYPES + 1];
2943 char *p = tmp;
2944 int i;
2945
2946 for (i = 0; i < MIGRATE_TYPES; i++) {
2947 if (type & (1 << i))
2948 *p++ = types[i];
2949 }
2950
2951 *p = '\0';
2952 printk("(%s) ", tmp);
2953}
2954
1da177e4
LT
2955/*
2956 * Show free area list (used inside shift_scroll-lock stuff)
2957 * We also calculate the percentage fragmentation. We do this by counting the
2958 * memory on each free list with the exception of the first item on the list.
ddd588b5
DR
2959 * Suppresses nodes that are not allowed by current's cpuset if
2960 * SHOW_MEM_FILTER_NODES is passed.
1da177e4 2961 */
7bf02ea2 2962void show_free_areas(unsigned int filter)
1da177e4 2963{
c7241913 2964 int cpu;
1da177e4
LT
2965 struct zone *zone;
2966
ee99c71c 2967 for_each_populated_zone(zone) {
7bf02ea2 2968 if (skip_free_areas_node(filter, zone_to_nid(zone)))
ddd588b5 2969 continue;
c7241913
JS
2970 show_node(zone);
2971 printk("%s per-cpu:\n", zone->name);
1da177e4 2972
6b482c67 2973 for_each_online_cpu(cpu) {
1da177e4
LT
2974 struct per_cpu_pageset *pageset;
2975
99dcc3e5 2976 pageset = per_cpu_ptr(zone->pageset, cpu);
1da177e4 2977
3dfa5721
CL
2978 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2979 cpu, pageset->pcp.high,
2980 pageset->pcp.batch, pageset->pcp.count);
1da177e4
LT
2981 }
2982 }
2983
a731286d
KM
2984 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2985 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
7b854121 2986 " unevictable:%lu"
b76146ed 2987 " dirty:%lu writeback:%lu unstable:%lu\n"
3701b033 2988 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
d1ce749a
BZ
2989 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
2990 " free_cma:%lu\n",
4f98a2fe 2991 global_page_state(NR_ACTIVE_ANON),
4f98a2fe 2992 global_page_state(NR_INACTIVE_ANON),
a731286d
KM
2993 global_page_state(NR_ISOLATED_ANON),
2994 global_page_state(NR_ACTIVE_FILE),
4f98a2fe 2995 global_page_state(NR_INACTIVE_FILE),
a731286d 2996 global_page_state(NR_ISOLATED_FILE),
7b854121 2997 global_page_state(NR_UNEVICTABLE),
b1e7a8fd 2998 global_page_state(NR_FILE_DIRTY),
ce866b34 2999 global_page_state(NR_WRITEBACK),
fd39fc85 3000 global_page_state(NR_UNSTABLE_NFS),
d23ad423 3001 global_page_state(NR_FREE_PAGES),
3701b033
KM
3002 global_page_state(NR_SLAB_RECLAIMABLE),
3003 global_page_state(NR_SLAB_UNRECLAIMABLE),
65ba55f5 3004 global_page_state(NR_FILE_MAPPED),
4b02108a 3005 global_page_state(NR_SHMEM),
a25700a5 3006 global_page_state(NR_PAGETABLE),
d1ce749a
BZ
3007 global_page_state(NR_BOUNCE),
3008 global_page_state(NR_FREE_CMA_PAGES));
1da177e4 3009
ee99c71c 3010 for_each_populated_zone(zone) {
1da177e4
LT
3011 int i;
3012
7bf02ea2 3013 if (skip_free_areas_node(filter, zone_to_nid(zone)))
ddd588b5 3014 continue;
1da177e4
LT
3015 show_node(zone);
3016 printk("%s"
3017 " free:%lukB"
3018 " min:%lukB"
3019 " low:%lukB"
3020 " high:%lukB"
4f98a2fe
RR
3021 " active_anon:%lukB"
3022 " inactive_anon:%lukB"
3023 " active_file:%lukB"
3024 " inactive_file:%lukB"
7b854121 3025 " unevictable:%lukB"
a731286d
KM
3026 " isolated(anon):%lukB"
3027 " isolated(file):%lukB"
1da177e4 3028 " present:%lukB"
9feedc9d 3029 " managed:%lukB"
4a0aa73f
KM
3030 " mlocked:%lukB"
3031 " dirty:%lukB"
3032 " writeback:%lukB"
3033 " mapped:%lukB"
4b02108a 3034 " shmem:%lukB"
4a0aa73f
KM
3035 " slab_reclaimable:%lukB"
3036 " slab_unreclaimable:%lukB"
c6a7f572 3037 " kernel_stack:%lukB"
4a0aa73f
KM
3038 " pagetables:%lukB"
3039 " unstable:%lukB"
3040 " bounce:%lukB"
d1ce749a 3041 " free_cma:%lukB"
4a0aa73f 3042 " writeback_tmp:%lukB"
1da177e4
LT
3043 " pages_scanned:%lu"
3044 " all_unreclaimable? %s"
3045 "\n",
3046 zone->name,
88f5acf8 3047 K(zone_page_state(zone, NR_FREE_PAGES)),
41858966
MG
3048 K(min_wmark_pages(zone)),
3049 K(low_wmark_pages(zone)),
3050 K(high_wmark_pages(zone)),
4f98a2fe
RR
3051 K(zone_page_state(zone, NR_ACTIVE_ANON)),
3052 K(zone_page_state(zone, NR_INACTIVE_ANON)),
3053 K(zone_page_state(zone, NR_ACTIVE_FILE)),
3054 K(zone_page_state(zone, NR_INACTIVE_FILE)),
7b854121 3055 K(zone_page_state(zone, NR_UNEVICTABLE)),
a731286d
KM
3056 K(zone_page_state(zone, NR_ISOLATED_ANON)),
3057 K(zone_page_state(zone, NR_ISOLATED_FILE)),
1da177e4 3058 K(zone->present_pages),
9feedc9d 3059 K(zone->managed_pages),
4a0aa73f
KM
3060 K(zone_page_state(zone, NR_MLOCK)),
3061 K(zone_page_state(zone, NR_FILE_DIRTY)),
3062 K(zone_page_state(zone, NR_WRITEBACK)),
3063 K(zone_page_state(zone, NR_FILE_MAPPED)),
4b02108a 3064 K(zone_page_state(zone, NR_SHMEM)),
4a0aa73f
KM
3065 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
3066 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
c6a7f572
KM
3067 zone_page_state(zone, NR_KERNEL_STACK) *
3068 THREAD_SIZE / 1024,
4a0aa73f
KM
3069 K(zone_page_state(zone, NR_PAGETABLE)),
3070 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
3071 K(zone_page_state(zone, NR_BOUNCE)),
d1ce749a 3072 K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
4a0aa73f 3073 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
1da177e4 3074 zone->pages_scanned,
93e4a89a 3075 (zone->all_unreclaimable ? "yes" : "no")
1da177e4
LT
3076 );
3077 printk("lowmem_reserve[]:");
3078 for (i = 0; i < MAX_NR_ZONES; i++)
3079 printk(" %lu", zone->lowmem_reserve[i]);
3080 printk("\n");
3081 }
3082
ee99c71c 3083 for_each_populated_zone(zone) {
8f9de51a 3084 unsigned long nr[MAX_ORDER], flags, order, total = 0;
377e4f16 3085 unsigned char types[MAX_ORDER];
1da177e4 3086
7bf02ea2 3087 if (skip_free_areas_node(filter, zone_to_nid(zone)))
ddd588b5 3088 continue;
1da177e4
LT
3089 show_node(zone);
3090 printk("%s: ", zone->name);
1da177e4
LT
3091
3092 spin_lock_irqsave(&zone->lock, flags);
3093 for (order = 0; order < MAX_ORDER; order++) {
377e4f16
RV
3094 struct free_area *area = &zone->free_area[order];
3095 int type;
3096
3097 nr[order] = area->nr_free;
8f9de51a 3098 total += nr[order] << order;
377e4f16
RV
3099
3100 types[order] = 0;
3101 for (type = 0; type < MIGRATE_TYPES; type++) {
3102 if (!list_empty(&area->free_list[type]))
3103 types[order] |= 1 << type;
3104 }
1da177e4
LT
3105 }
3106 spin_unlock_irqrestore(&zone->lock, flags);
377e4f16 3107 for (order = 0; order < MAX_ORDER; order++) {
8f9de51a 3108 printk("%lu*%lukB ", nr[order], K(1UL) << order);
377e4f16
RV
3109 if (nr[order])
3110 show_migration_types(types[order]);
3111 }
1da177e4
LT
3112 printk("= %lukB\n", K(total));
3113 }
3114
e6f3602d
LW
3115 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
3116
1da177e4
LT
3117 show_swap_cache_info();
3118}
3119
19770b32
MG
3120static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
3121{
3122 zoneref->zone = zone;
3123 zoneref->zone_idx = zone_idx(zone);
3124}
3125
1da177e4
LT
3126/*
3127 * Builds allocation fallback zone lists.
1a93205b
CL
3128 *
3129 * Add all populated zones of a node to the zonelist.
1da177e4 3130 */
f0c0b2b8
KH
3131static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
3132 int nr_zones, enum zone_type zone_type)
1da177e4 3133{
1a93205b
CL
3134 struct zone *zone;
3135
98d2b0eb 3136 BUG_ON(zone_type >= MAX_NR_ZONES);
2f6726e5 3137 zone_type++;
02a68a5e
CL
3138
3139 do {
2f6726e5 3140 zone_type--;
070f8032 3141 zone = pgdat->node_zones + zone_type;
1a93205b 3142 if (populated_zone(zone)) {
dd1a239f
MG
3143 zoneref_set_zone(zone,
3144 &zonelist->_zonerefs[nr_zones++]);
070f8032 3145 check_highest_zone(zone_type);
1da177e4 3146 }
02a68a5e 3147
2f6726e5 3148 } while (zone_type);
070f8032 3149 return nr_zones;
1da177e4
LT
3150}
3151
f0c0b2b8
KH
3152
3153/*
3154 * zonelist_order:
3155 * 0 = automatic detection of better ordering.
3156 * 1 = order by ([node] distance, -zonetype)
3157 * 2 = order by (-zonetype, [node] distance)
3158 *
3159 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
3160 * the same zonelist. So only NUMA can configure this param.
3161 */
3162#define ZONELIST_ORDER_DEFAULT 0
3163#define ZONELIST_ORDER_NODE 1
3164#define ZONELIST_ORDER_ZONE 2
3165
3166/* zonelist order in the kernel.
3167 * set_zonelist_order() will set this to NODE or ZONE.
3168 */
3169static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
3170static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
3171
3172
1da177e4 3173#ifdef CONFIG_NUMA
f0c0b2b8
KH
3174/* The value user specified ....changed by config */
3175static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3176/* string for sysctl */
3177#define NUMA_ZONELIST_ORDER_LEN 16
3178char numa_zonelist_order[16] = "default";
3179
3180/*
3181 * interface for configure zonelist ordering.
3182 * command line option "numa_zonelist_order"
3183 * = "[dD]efault - default, automatic configuration.
3184 * = "[nN]ode - order by node locality, then by zone within node
3185 * = "[zZ]one - order by zone, then by locality within zone
3186 */
3187
3188static int __parse_numa_zonelist_order(char *s)
3189{
3190 if (*s == 'd' || *s == 'D') {
3191 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3192 } else if (*s == 'n' || *s == 'N') {
3193 user_zonelist_order = ZONELIST_ORDER_NODE;
3194 } else if (*s == 'z' || *s == 'Z') {
3195 user_zonelist_order = ZONELIST_ORDER_ZONE;
3196 } else {
3197 printk(KERN_WARNING
3198 "Ignoring invalid numa_zonelist_order value: "
3199 "%s\n", s);
3200 return -EINVAL;
3201 }
3202 return 0;
3203}
3204
3205static __init int setup_numa_zonelist_order(char *s)
3206{
ecb256f8
VL
3207 int ret;
3208
3209 if (!s)
3210 return 0;
3211
3212 ret = __parse_numa_zonelist_order(s);
3213 if (ret == 0)
3214 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
3215
3216 return ret;
f0c0b2b8
KH
3217}
3218early_param("numa_zonelist_order", setup_numa_zonelist_order);
3219
3220/*
3221 * sysctl handler for numa_zonelist_order
3222 */
3223int numa_zonelist_order_handler(ctl_table *table, int write,
8d65af78 3224 void __user *buffer, size_t *length,
f0c0b2b8
KH
3225 loff_t *ppos)
3226{
3227 char saved_string[NUMA_ZONELIST_ORDER_LEN];
3228 int ret;
443c6f14 3229 static DEFINE_MUTEX(zl_order_mutex);
f0c0b2b8 3230
443c6f14 3231 mutex_lock(&zl_order_mutex);
f0c0b2b8 3232 if (write)
443c6f14 3233 strcpy(saved_string, (char*)table->data);
8d65af78 3234 ret = proc_dostring(table, write, buffer, length, ppos);
f0c0b2b8 3235 if (ret)
443c6f14 3236 goto out;
f0c0b2b8
KH
3237 if (write) {
3238 int oldval = user_zonelist_order;
3239 if (__parse_numa_zonelist_order((char*)table->data)) {
3240 /*
3241 * bogus value. restore saved string
3242 */
3243 strncpy((char*)table->data, saved_string,
3244 NUMA_ZONELIST_ORDER_LEN);
3245 user_zonelist_order = oldval;
4eaf3f64
HL
3246 } else if (oldval != user_zonelist_order) {
3247 mutex_lock(&zonelists_mutex);
9adb62a5 3248 build_all_zonelists(NULL, NULL);
4eaf3f64
HL
3249 mutex_unlock(&zonelists_mutex);
3250 }
f0c0b2b8 3251 }
443c6f14
AK
3252out:
3253 mutex_unlock(&zl_order_mutex);
3254 return ret;
f0c0b2b8
KH
3255}
3256
3257
62bc62a8 3258#define MAX_NODE_LOAD (nr_online_nodes)
f0c0b2b8
KH
3259static int node_load[MAX_NUMNODES];
3260
1da177e4 3261/**
4dc3b16b 3262 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
3263 * @node: node whose fallback list we're appending
3264 * @used_node_mask: nodemask_t of already used nodes
3265 *
3266 * We use a number of factors to determine which is the next node that should
3267 * appear on a given node's fallback list. The node should not have appeared
3268 * already in @node's fallback list, and it should be the next closest node
3269 * according to the distance array (which contains arbitrary distance values
3270 * from each node to each node in the system), and should also prefer nodes
3271 * with no CPUs, since presumably they'll have very little allocation pressure
3272 * on them otherwise.
3273 * It returns -1 if no node is found.
3274 */
f0c0b2b8 3275static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 3276{
4cf808eb 3277 int n, val;
1da177e4 3278 int min_val = INT_MAX;
00ef2d2f 3279 int best_node = NUMA_NO_NODE;
a70f7302 3280 const struct cpumask *tmp = cpumask_of_node(0);
1da177e4 3281
4cf808eb
LT
3282 /* Use the local node if we haven't already */
3283 if (!node_isset(node, *used_node_mask)) {
3284 node_set(node, *used_node_mask);
3285 return node;
3286 }
1da177e4 3287
4b0ef1fe 3288 for_each_node_state(n, N_MEMORY) {
1da177e4
LT
3289
3290 /* Don't want a node to appear more than once */
3291 if (node_isset(n, *used_node_mask))
3292 continue;
3293
1da177e4
LT
3294 /* Use the distance array to find the distance */
3295 val = node_distance(node, n);
3296
4cf808eb
LT
3297 /* Penalize nodes under us ("prefer the next node") */
3298 val += (n < node);
3299
1da177e4 3300 /* Give preference to headless and unused nodes */
a70f7302
RR
3301 tmp = cpumask_of_node(n);
3302 if (!cpumask_empty(tmp))
1da177e4
LT
3303 val += PENALTY_FOR_NODE_WITH_CPUS;
3304
3305 /* Slight preference for less loaded node */
3306 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
3307 val += node_load[n];
3308
3309 if (val < min_val) {
3310 min_val = val;
3311 best_node = n;
3312 }
3313 }
3314
3315 if (best_node >= 0)
3316 node_set(best_node, *used_node_mask);
3317
3318 return best_node;
3319}
3320
f0c0b2b8
KH
3321
3322/*
3323 * Build zonelists ordered by node and zones within node.
3324 * This results in maximum locality--normal zone overflows into local
3325 * DMA zone, if any--but risks exhausting DMA zone.
3326 */
3327static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
1da177e4 3328{
f0c0b2b8 3329 int j;
1da177e4 3330 struct zonelist *zonelist;
f0c0b2b8 3331
54a6eb5c 3332 zonelist = &pgdat->node_zonelists[0];
dd1a239f 3333 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
54a6eb5c
MG
3334 ;
3335 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3336 MAX_NR_ZONES - 1);
dd1a239f
MG
3337 zonelist->_zonerefs[j].zone = NULL;
3338 zonelist->_zonerefs[j].zone_idx = 0;
f0c0b2b8
KH
3339}
3340
523b9458
CL
3341/*
3342 * Build gfp_thisnode zonelists
3343 */
3344static void build_thisnode_zonelists(pg_data_t *pgdat)
3345{
523b9458
CL
3346 int j;
3347 struct zonelist *zonelist;
3348
54a6eb5c
MG
3349 zonelist = &pgdat->node_zonelists[1];
3350 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
dd1a239f
MG
3351 zonelist->_zonerefs[j].zone = NULL;
3352 zonelist->_zonerefs[j].zone_idx = 0;
523b9458
CL
3353}
3354
f0c0b2b8
KH
3355/*
3356 * Build zonelists ordered by zone and nodes within zones.
3357 * This results in conserving DMA zone[s] until all Normal memory is
3358 * exhausted, but results in overflowing to remote node while memory
3359 * may still exist in local DMA zone.
3360 */
3361static int node_order[MAX_NUMNODES];
3362
3363static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
3364{
f0c0b2b8
KH
3365 int pos, j, node;
3366 int zone_type; /* needs to be signed */
3367 struct zone *z;
3368 struct zonelist *zonelist;
3369
54a6eb5c
MG
3370 zonelist = &pgdat->node_zonelists[0];
3371 pos = 0;
3372 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
3373 for (j = 0; j < nr_nodes; j++) {
3374 node = node_order[j];
3375 z = &NODE_DATA(node)->node_zones[zone_type];
3376 if (populated_zone(z)) {
dd1a239f
MG
3377 zoneref_set_zone(z,
3378 &zonelist->_zonerefs[pos++]);
54a6eb5c 3379 check_highest_zone(zone_type);
f0c0b2b8
KH
3380 }
3381 }
f0c0b2b8 3382 }
dd1a239f
MG
3383 zonelist->_zonerefs[pos].zone = NULL;
3384 zonelist->_zonerefs[pos].zone_idx = 0;
f0c0b2b8
KH
3385}
3386
3387static int default_zonelist_order(void)
3388{
3389 int nid, zone_type;
3390 unsigned long low_kmem_size,total_size;
3391 struct zone *z;
3392 int average_size;
3393 /*
88393161 3394 * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
f0c0b2b8
KH
3395 * If they are really small and used heavily, the system can fall
3396 * into OOM very easily.
e325c90f 3397 * This function detect ZONE_DMA/DMA32 size and configures zone order.
f0c0b2b8
KH
3398 */
3399 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
3400 low_kmem_size = 0;
3401 total_size = 0;
3402 for_each_online_node(nid) {
3403 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3404 z = &NODE_DATA(nid)->node_zones[zone_type];
3405 if (populated_zone(z)) {
3406 if (zone_type < ZONE_NORMAL)
3407 low_kmem_size += z->present_pages;
3408 total_size += z->present_pages;
e325c90f
DR
3409 } else if (zone_type == ZONE_NORMAL) {
3410 /*
3411 * If any node has only lowmem, then node order
3412 * is preferred to allow kernel allocations
3413 * locally; otherwise, they can easily infringe
3414 * on other nodes when there is an abundance of
3415 * lowmem available to allocate from.
3416 */
3417 return ZONELIST_ORDER_NODE;
f0c0b2b8
KH
3418 }
3419 }
3420 }
3421 if (!low_kmem_size || /* there are no DMA area. */
3422 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
3423 return ZONELIST_ORDER_NODE;
3424 /*
3425 * look into each node's config.
3426 * If there is a node whose DMA/DMA32 memory is very big area on
3427 * local memory, NODE_ORDER may be suitable.
3428 */
37b07e41 3429 average_size = total_size /
4b0ef1fe 3430 (nodes_weight(node_states[N_MEMORY]) + 1);
f0c0b2b8
KH
3431 for_each_online_node(nid) {
3432 low_kmem_size = 0;
3433 total_size = 0;
3434 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3435 z = &NODE_DATA(nid)->node_zones[zone_type];
3436 if (populated_zone(z)) {
3437 if (zone_type < ZONE_NORMAL)
3438 low_kmem_size += z->present_pages;
3439 total_size += z->present_pages;
3440 }
3441 }
3442 if (low_kmem_size &&
3443 total_size > average_size && /* ignore small node */
3444 low_kmem_size > total_size * 70/100)
3445 return ZONELIST_ORDER_NODE;
3446 }
3447 return ZONELIST_ORDER_ZONE;
3448}
3449
3450static void set_zonelist_order(void)
3451{
3452 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
3453 current_zonelist_order = default_zonelist_order();
3454 else
3455 current_zonelist_order = user_zonelist_order;
3456}
3457
3458static void build_zonelists(pg_data_t *pgdat)
3459{
3460 int j, node, load;
3461 enum zone_type i;
1da177e4 3462 nodemask_t used_mask;
f0c0b2b8
KH
3463 int local_node, prev_node;
3464 struct zonelist *zonelist;
3465 int order = current_zonelist_order;
1da177e4
LT
3466
3467 /* initialize zonelists */
523b9458 3468 for (i = 0; i < MAX_ZONELISTS; i++) {
1da177e4 3469 zonelist = pgdat->node_zonelists + i;
dd1a239f
MG
3470 zonelist->_zonerefs[0].zone = NULL;
3471 zonelist->_zonerefs[0].zone_idx = 0;
1da177e4
LT
3472 }
3473
3474 /* NUMA-aware ordering of nodes */
3475 local_node = pgdat->node_id;
62bc62a8 3476 load = nr_online_nodes;
1da177e4
LT
3477 prev_node = local_node;
3478 nodes_clear(used_mask);
f0c0b2b8 3479
f0c0b2b8
KH
3480 memset(node_order, 0, sizeof(node_order));
3481 j = 0;
3482
1da177e4
LT
3483 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
3484 /*
3485 * We don't want to pressure a particular node.
3486 * So adding penalty to the first node in same
3487 * distance group to make it round-robin.
3488 */
957f822a
DR
3489 if (node_distance(local_node, node) !=
3490 node_distance(local_node, prev_node))
f0c0b2b8
KH
3491 node_load[node] = load;
3492
1da177e4
LT
3493 prev_node = node;
3494 load--;
f0c0b2b8
KH
3495 if (order == ZONELIST_ORDER_NODE)
3496 build_zonelists_in_node_order(pgdat, node);
3497 else
3498 node_order[j++] = node; /* remember order */
3499 }
1da177e4 3500
f0c0b2b8
KH
3501 if (order == ZONELIST_ORDER_ZONE) {
3502 /* calculate node order -- i.e., DMA last! */
3503 build_zonelists_in_zone_order(pgdat, j);
1da177e4 3504 }
523b9458
CL
3505
3506 build_thisnode_zonelists(pgdat);
1da177e4
LT
3507}
3508
9276b1bc 3509/* Construct the zonelist performance cache - see further mmzone.h */
f0c0b2b8 3510static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 3511{
54a6eb5c
MG
3512 struct zonelist *zonelist;
3513 struct zonelist_cache *zlc;
dd1a239f 3514 struct zoneref *z;
9276b1bc 3515
54a6eb5c
MG
3516 zonelist = &pgdat->node_zonelists[0];
3517 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3518 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
dd1a239f
MG
3519 for (z = zonelist->_zonerefs; z->zone; z++)
3520 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
9276b1bc
PJ
3521}
3522
7aac7898
LS
3523#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3524/*
3525 * Return node id of node used for "local" allocations.
3526 * I.e., first node id of first zone in arg node's generic zonelist.
3527 * Used for initializing percpu 'numa_mem', which is used primarily
3528 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
3529 */
3530int local_memory_node(int node)
3531{
3532 struct zone *zone;
3533
3534 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3535 gfp_zone(GFP_KERNEL),
3536 NULL,
3537 &zone);
3538 return zone->node;
3539}
3540#endif
f0c0b2b8 3541
1da177e4
LT
3542#else /* CONFIG_NUMA */
3543
f0c0b2b8
KH
3544static void set_zonelist_order(void)
3545{
3546 current_zonelist_order = ZONELIST_ORDER_ZONE;
3547}
3548
3549static void build_zonelists(pg_data_t *pgdat)
1da177e4 3550{
19655d34 3551 int node, local_node;
54a6eb5c
MG
3552 enum zone_type j;
3553 struct zonelist *zonelist;
1da177e4
LT
3554
3555 local_node = pgdat->node_id;
1da177e4 3556
54a6eb5c
MG
3557 zonelist = &pgdat->node_zonelists[0];
3558 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
1da177e4 3559
54a6eb5c
MG
3560 /*
3561 * Now we build the zonelist so that it contains the zones
3562 * of all the other nodes.
3563 * We don't want to pressure a particular node, so when
3564 * building the zones for node N, we make sure that the
3565 * zones coming right after the local ones are those from
3566 * node N+1 (modulo N)
3567 */
3568 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3569 if (!node_online(node))
3570 continue;
3571 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3572 MAX_NR_ZONES - 1);
1da177e4 3573 }
54a6eb5c
MG
3574 for (node = 0; node < local_node; node++) {
3575 if (!node_online(node))
3576 continue;
3577 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3578 MAX_NR_ZONES - 1);
3579 }
3580
dd1a239f
MG
3581 zonelist->_zonerefs[j].zone = NULL;
3582 zonelist->_zonerefs[j].zone_idx = 0;
1da177e4
LT
3583}
3584
9276b1bc 3585/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
f0c0b2b8 3586static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 3587{
54a6eb5c 3588 pgdat->node_zonelists[0].zlcache_ptr = NULL;
9276b1bc
PJ
3589}
3590
1da177e4
LT
3591#endif /* CONFIG_NUMA */
3592
99dcc3e5
CL
3593/*
3594 * Boot pageset table. One per cpu which is going to be used for all
3595 * zones and all nodes. The parameters will be set in such a way
3596 * that an item put on a list will immediately be handed over to
3597 * the buddy list. This is safe since pageset manipulation is done
3598 * with interrupts disabled.
3599 *
3600 * The boot_pagesets must be kept even after bootup is complete for
3601 * unused processors and/or zones. They do play a role for bootstrapping
3602 * hotplugged processors.
3603 *
3604 * zoneinfo_show() and maybe other functions do
3605 * not check if the processor is online before following the pageset pointer.
3606 * Other parts of the kernel may not check if the zone is available.
3607 */
3608static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3609static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
1f522509 3610static void setup_zone_pageset(struct zone *zone);
99dcc3e5 3611
4eaf3f64
HL
3612/*
3613 * Global mutex to protect against size modification of zonelists
3614 * as well as to serialize pageset setup for the new populated zone.
3615 */
3616DEFINE_MUTEX(zonelists_mutex);
3617
9b1a4d38 3618/* return values int ....just for stop_machine() */
4ed7e022 3619static int __build_all_zonelists(void *data)
1da177e4 3620{
6811378e 3621 int nid;
99dcc3e5 3622 int cpu;
9adb62a5 3623 pg_data_t *self = data;
9276b1bc 3624
7f9cfb31
BL
3625#ifdef CONFIG_NUMA
3626 memset(node_load, 0, sizeof(node_load));
3627#endif
9adb62a5
JL
3628
3629 if (self && !node_online(self->node_id)) {
3630 build_zonelists(self);
3631 build_zonelist_cache(self);
3632 }
3633
9276b1bc 3634 for_each_online_node(nid) {
7ea1530a
CL
3635 pg_data_t *pgdat = NODE_DATA(nid);
3636
3637 build_zonelists(pgdat);
3638 build_zonelist_cache(pgdat);
9276b1bc 3639 }
99dcc3e5
CL
3640
3641 /*
3642 * Initialize the boot_pagesets that are going to be used
3643 * for bootstrapping processors. The real pagesets for
3644 * each zone will be allocated later when the per cpu
3645 * allocator is available.
3646 *
3647 * boot_pagesets are used also for bootstrapping offline
3648 * cpus if the system is already booted because the pagesets
3649 * are needed to initialize allocators on a specific cpu too.
3650 * F.e. the percpu allocator needs the page allocator which
3651 * needs the percpu allocator in order to allocate its pagesets
3652 * (a chicken-egg dilemma).
3653 */
7aac7898 3654 for_each_possible_cpu(cpu) {
99dcc3e5
CL
3655 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3656
7aac7898
LS
3657#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3658 /*
3659 * We now know the "local memory node" for each node--
3660 * i.e., the node of the first zone in the generic zonelist.
3661 * Set up numa_mem percpu variable for on-line cpus. During
3662 * boot, only the boot cpu should be on-line; we'll init the
3663 * secondary cpus' numa_mem as they come on-line. During
3664 * node/memory hotplug, we'll fixup all on-line cpus.
3665 */
3666 if (cpu_online(cpu))
3667 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3668#endif
3669 }
3670
6811378e
YG
3671 return 0;
3672}
3673
4eaf3f64
HL
3674/*
3675 * Called with zonelists_mutex held always
3676 * unless system_state == SYSTEM_BOOTING.
3677 */
9adb62a5 3678void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
6811378e 3679{
f0c0b2b8
KH
3680 set_zonelist_order();
3681
6811378e 3682 if (system_state == SYSTEM_BOOTING) {
423b41d7 3683 __build_all_zonelists(NULL);
68ad8df4 3684 mminit_verify_zonelist();
6811378e
YG
3685 cpuset_init_current_mems_allowed();
3686 } else {
183ff22b 3687 /* we have to stop all cpus to guarantee there is no user
6811378e 3688 of zonelist */
e9959f0f 3689#ifdef CONFIG_MEMORY_HOTPLUG
9adb62a5
JL
3690 if (zone)
3691 setup_zone_pageset(zone);
e9959f0f 3692#endif
9adb62a5 3693 stop_machine(__build_all_zonelists, pgdat, NULL);
6811378e
YG
3694 /* cpuset refresh routine should be here */
3695 }
bd1e22b8 3696 vm_total_pages = nr_free_pagecache_pages();
9ef9acb0
MG
3697 /*
3698 * Disable grouping by mobility if the number of pages in the
3699 * system is too low to allow the mechanism to work. It would be
3700 * more accurate, but expensive to check per-zone. This check is
3701 * made on memory-hotadd so a system can start with mobility
3702 * disabled and enable it later
3703 */
d9c23400 3704 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
3705 page_group_by_mobility_disabled = 1;
3706 else
3707 page_group_by_mobility_disabled = 0;
3708
3709 printk("Built %i zonelists in %s order, mobility grouping %s. "
3710 "Total pages: %ld\n",
62bc62a8 3711 nr_online_nodes,
f0c0b2b8 3712 zonelist_order_name[current_zonelist_order],
9ef9acb0 3713 page_group_by_mobility_disabled ? "off" : "on",
f0c0b2b8
KH
3714 vm_total_pages);
3715#ifdef CONFIG_NUMA
3716 printk("Policy zone: %s\n", zone_names[policy_zone]);
3717#endif
1da177e4
LT
3718}
3719
3720/*
3721 * Helper functions to size the waitqueue hash table.
3722 * Essentially these want to choose hash table sizes sufficiently
3723 * large so that collisions trying to wait on pages are rare.
3724 * But in fact, the number of active page waitqueues on typical
3725 * systems is ridiculously low, less than 200. So this is even
3726 * conservative, even though it seems large.
3727 *
3728 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3729 * waitqueues, i.e. the size of the waitq table given the number of pages.
3730 */
3731#define PAGES_PER_WAITQUEUE 256
3732
cca448fe 3733#ifndef CONFIG_MEMORY_HOTPLUG
02b694de 3734static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1da177e4
LT
3735{
3736 unsigned long size = 1;
3737
3738 pages /= PAGES_PER_WAITQUEUE;
3739
3740 while (size < pages)
3741 size <<= 1;
3742
3743 /*
3744 * Once we have dozens or even hundreds of threads sleeping
3745 * on IO we've got bigger problems than wait queue collision.
3746 * Limit the size of the wait table to a reasonable size.
3747 */
3748 size = min(size, 4096UL);
3749
3750 return max(size, 4UL);
3751}
cca448fe
YG
3752#else
3753/*
3754 * A zone's size might be changed by hot-add, so it is not possible to determine
3755 * a suitable size for its wait_table. So we use the maximum size now.
3756 *
3757 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
3758 *
3759 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
3760 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3761 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
3762 *
3763 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3764 * or more by the traditional way. (See above). It equals:
3765 *
3766 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
3767 * ia64(16K page size) : = ( 8G + 4M)byte.
3768 * powerpc (64K page size) : = (32G +16M)byte.
3769 */
3770static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3771{
3772 return 4096UL;
3773}
3774#endif
1da177e4
LT
3775
3776/*
3777 * This is an integer logarithm so that shifts can be used later
3778 * to extract the more random high bits from the multiplicative
3779 * hash function before the remainder is taken.
3780 */
3781static inline unsigned long wait_table_bits(unsigned long size)
3782{
3783 return ffz(~size);
3784}
3785
3786#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3787
6d3163ce
AH
3788/*
3789 * Check if a pageblock contains reserved pages
3790 */
3791static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
3792{
3793 unsigned long pfn;
3794
3795 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3796 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
3797 return 1;
3798 }
3799 return 0;
3800}
3801
56fd56b8 3802/*
d9c23400 3803 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
41858966
MG
3804 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3805 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
56fd56b8
MG
3806 * higher will lead to a bigger reserve which will get freed as contiguous
3807 * blocks as reclaim kicks in
3808 */
3809static void setup_zone_migrate_reserve(struct zone *zone)
3810{
6d3163ce 3811 unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
56fd56b8 3812 struct page *page;
78986a67
MG
3813 unsigned long block_migratetype;
3814 int reserve;
56fd56b8 3815
d0215638
MH
3816 /*
3817 * Get the start pfn, end pfn and the number of blocks to reserve
3818 * We have to be careful to be aligned to pageblock_nr_pages to
3819 * make sure that we always check pfn_valid for the first page in
3820 * the block.
3821 */
56fd56b8 3822 start_pfn = zone->zone_start_pfn;
108bcc96 3823 end_pfn = zone_end_pfn(zone);
d0215638 3824 start_pfn = roundup(start_pfn, pageblock_nr_pages);
41858966 3825 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
d9c23400 3826 pageblock_order;
56fd56b8 3827
78986a67
MG
3828 /*
3829 * Reserve blocks are generally in place to help high-order atomic
3830 * allocations that are short-lived. A min_free_kbytes value that
3831 * would result in more than 2 reserve blocks for atomic allocations
3832 * is assumed to be in place to help anti-fragmentation for the
3833 * future allocation of hugepages at runtime.
3834 */
3835 reserve = min(2, reserve);
3836
d9c23400 3837 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
56fd56b8
MG
3838 if (!pfn_valid(pfn))
3839 continue;
3840 page = pfn_to_page(pfn);
3841
344c790e
AL
3842 /* Watch out for overlapping nodes */
3843 if (page_to_nid(page) != zone_to_nid(zone))
3844 continue;
3845
56fd56b8
MG
3846 block_migratetype = get_pageblock_migratetype(page);
3847
938929f1
MG
3848 /* Only test what is necessary when the reserves are not met */
3849 if (reserve > 0) {
3850 /*
3851 * Blocks with reserved pages will never free, skip
3852 * them.
3853 */
3854 block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
3855 if (pageblock_is_reserved(pfn, block_end_pfn))
3856 continue;
56fd56b8 3857
938929f1
MG
3858 /* If this block is reserved, account for it */
3859 if (block_migratetype == MIGRATE_RESERVE) {
3860 reserve--;
3861 continue;
3862 }
3863
3864 /* Suitable for reserving if this block is movable */
3865 if (block_migratetype == MIGRATE_MOVABLE) {
3866 set_pageblock_migratetype(page,
3867 MIGRATE_RESERVE);
3868 move_freepages_block(zone, page,
3869 MIGRATE_RESERVE);
3870 reserve--;
3871 continue;
3872 }
56fd56b8
MG
3873 }
3874
3875 /*
3876 * If the reserve is met and this is a previous reserved block,
3877 * take it back
3878 */
3879 if (block_migratetype == MIGRATE_RESERVE) {
3880 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3881 move_freepages_block(zone, page, MIGRATE_MOVABLE);
3882 }
3883 }
3884}
ac0e5b7a 3885
1da177e4
LT
3886/*
3887 * Initially all pages are reserved - free ones are freed
3888 * up by free_all_bootmem() once the early boot process is
3889 * done. Non-atomic initialization, single-pass.
3890 */
c09b4240 3891void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
a2f3aa02 3892 unsigned long start_pfn, enum memmap_context context)
1da177e4 3893{
1da177e4 3894 struct page *page;
29751f69
AW
3895 unsigned long end_pfn = start_pfn + size;
3896 unsigned long pfn;
86051ca5 3897 struct zone *z;
1da177e4 3898
22b31eec
HD
3899 if (highest_memmap_pfn < end_pfn - 1)
3900 highest_memmap_pfn = end_pfn - 1;
3901
86051ca5 3902 z = &NODE_DATA(nid)->node_zones[zone];
cbe8dd4a 3903 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
a2f3aa02
DH
3904 /*
3905 * There can be holes in boot-time mem_map[]s
3906 * handed to this function. They do not
3907 * exist on hotplugged memory.
3908 */
3909 if (context == MEMMAP_EARLY) {
3910 if (!early_pfn_valid(pfn))
3911 continue;
3912 if (!early_pfn_in_nid(pfn, nid))
3913 continue;
3914 }
d41dee36
AW
3915 page = pfn_to_page(pfn);
3916 set_page_links(page, zone, nid, pfn);
708614e6 3917 mminit_verify_page_links(page, zone, nid, pfn);
7835e98b 3918 init_page_count(page);
22b751c3
MG
3919 page_mapcount_reset(page);
3920 page_nid_reset_last(page);
1da177e4 3921 SetPageReserved(page);
b2a0ac88
MG
3922 /*
3923 * Mark the block movable so that blocks are reserved for
3924 * movable at startup. This will force kernel allocations
3925 * to reserve their blocks rather than leaking throughout
3926 * the address space during boot when many long-lived
56fd56b8
MG
3927 * kernel allocations are made. Later some blocks near
3928 * the start are marked MIGRATE_RESERVE by
3929 * setup_zone_migrate_reserve()
86051ca5
KH
3930 *
3931 * bitmap is created for zone's valid pfn range. but memmap
3932 * can be created for invalid pages (for alignment)
3933 * check here not to call set_pageblock_migratetype() against
3934 * pfn out of zone.
b2a0ac88 3935 */
86051ca5 3936 if ((z->zone_start_pfn <= pfn)
108bcc96 3937 && (pfn < zone_end_pfn(z))
86051ca5 3938 && !(pfn & (pageblock_nr_pages - 1)))
56fd56b8 3939 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
b2a0ac88 3940
1da177e4
LT
3941 INIT_LIST_HEAD(&page->lru);
3942#ifdef WANT_PAGE_VIRTUAL
3943 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
3944 if (!is_highmem_idx(zone))
3212c6be 3945 set_page_address(page, __va(pfn << PAGE_SHIFT));
1da177e4 3946#endif
1da177e4
LT
3947 }
3948}
3949
1e548deb 3950static void __meminit zone_init_free_lists(struct zone *zone)
1da177e4 3951{
b2a0ac88
MG
3952 int order, t;
3953 for_each_migratetype_order(order, t) {
3954 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
3955 zone->free_area[order].nr_free = 0;
3956 }
3957}
3958
3959#ifndef __HAVE_ARCH_MEMMAP_INIT
3960#define memmap_init(size, nid, zone, start_pfn) \
a2f3aa02 3961 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1da177e4
LT
3962#endif
3963
4ed7e022 3964static int __meminit zone_batchsize(struct zone *zone)
e7c8d5c9 3965{
3a6be87f 3966#ifdef CONFIG_MMU
e7c8d5c9
CL
3967 int batch;
3968
3969 /*
3970 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 3971 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
3972 *
3973 * OK, so we don't know how big the cache is. So guess.
3974 */
b40da049 3975 batch = zone->managed_pages / 1024;
ba56e91c
SR
3976 if (batch * PAGE_SIZE > 512 * 1024)
3977 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
3978 batch /= 4; /* We effectively *= 4 below */
3979 if (batch < 1)
3980 batch = 1;
3981
3982 /*
0ceaacc9
NP
3983 * Clamp the batch to a 2^n - 1 value. Having a power
3984 * of 2 value was found to be more likely to have
3985 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 3986 *
0ceaacc9
NP
3987 * For example if 2 tasks are alternately allocating
3988 * batches of pages, one task can end up with a lot
3989 * of pages of one half of the possible page colors
3990 * and the other with pages of the other colors.
e7c8d5c9 3991 */
9155203a 3992 batch = rounddown_pow_of_two(batch + batch/2) - 1;
ba56e91c 3993
e7c8d5c9 3994 return batch;
3a6be87f
DH
3995
3996#else
3997 /* The deferral and batching of frees should be suppressed under NOMMU
3998 * conditions.
3999 *
4000 * The problem is that NOMMU needs to be able to allocate large chunks
4001 * of contiguous memory as there's no hardware page translation to
4002 * assemble apparent contiguous memory from discontiguous pages.
4003 *
4004 * Queueing large contiguous runs of pages for batching, however,
4005 * causes the pages to actually be freed in smaller chunks. As there
4006 * can be a significant delay between the individual batches being
4007 * recycled, this leads to the once large chunks of space being
4008 * fragmented and becoming unavailable for high-order allocations.
4009 */
4010 return 0;
4011#endif
e7c8d5c9
CL
4012}
4013
b69a7288 4014static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2caaad41
CL
4015{
4016 struct per_cpu_pages *pcp;
5f8dcc21 4017 int migratetype;
2caaad41 4018
1c6fe946
MD
4019 memset(p, 0, sizeof(*p));
4020
3dfa5721 4021 pcp = &p->pcp;
2caaad41 4022 pcp->count = 0;
2caaad41
CL
4023 pcp->high = 6 * batch;
4024 pcp->batch = max(1UL, 1 * batch);
5f8dcc21
MG
4025 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
4026 INIT_LIST_HEAD(&pcp->lists[migratetype]);
2caaad41
CL
4027}
4028
8ad4b1fb
RS
4029/*
4030 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
4031 * to the value high for the pageset p.
4032 */
4033
4034static void setup_pagelist_highmark(struct per_cpu_pageset *p,
4035 unsigned long high)
4036{
4037 struct per_cpu_pages *pcp;
4038
3dfa5721 4039 pcp = &p->pcp;
8ad4b1fb
RS
4040 pcp->high = high;
4041 pcp->batch = max(1UL, high/4);
4042 if ((high/4) > (PAGE_SHIFT * 8))
4043 pcp->batch = PAGE_SHIFT * 8;
4044}
4045
4ed7e022 4046static void __meminit setup_zone_pageset(struct zone *zone)
319774e2
WF
4047{
4048 int cpu;
4049
4050 zone->pageset = alloc_percpu(struct per_cpu_pageset);
4051
4052 for_each_possible_cpu(cpu) {
4053 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
4054
4055 setup_pageset(pcp, zone_batchsize(zone));
4056
4057 if (percpu_pagelist_fraction)
4058 setup_pagelist_highmark(pcp,
b40da049 4059 (zone->managed_pages /
319774e2
WF
4060 percpu_pagelist_fraction));
4061 }
4062}
4063
2caaad41 4064/*
99dcc3e5
CL
4065 * Allocate per cpu pagesets and initialize them.
4066 * Before this call only boot pagesets were available.
e7c8d5c9 4067 */
99dcc3e5 4068void __init setup_per_cpu_pageset(void)
e7c8d5c9 4069{
99dcc3e5 4070 struct zone *zone;
e7c8d5c9 4071
319774e2
WF
4072 for_each_populated_zone(zone)
4073 setup_zone_pageset(zone);
e7c8d5c9
CL
4074}
4075
577a32f6 4076static noinline __init_refok
cca448fe 4077int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
ed8ece2e
DH
4078{
4079 int i;
4080 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe 4081 size_t alloc_size;
ed8ece2e
DH
4082
4083 /*
4084 * The per-page waitqueue mechanism uses hashed waitqueues
4085 * per zone.
4086 */
02b694de
YG
4087 zone->wait_table_hash_nr_entries =
4088 wait_table_hash_nr_entries(zone_size_pages);
4089 zone->wait_table_bits =
4090 wait_table_bits(zone->wait_table_hash_nr_entries);
cca448fe
YG
4091 alloc_size = zone->wait_table_hash_nr_entries
4092 * sizeof(wait_queue_head_t);
4093
cd94b9db 4094 if (!slab_is_available()) {
cca448fe 4095 zone->wait_table = (wait_queue_head_t *)
8f389a99 4096 alloc_bootmem_node_nopanic(pgdat, alloc_size);
cca448fe
YG
4097 } else {
4098 /*
4099 * This case means that a zone whose size was 0 gets new memory
4100 * via memory hot-add.
4101 * But it may be the case that a new node was hot-added. In
4102 * this case vmalloc() will not be able to use this new node's
4103 * memory - this wait_table must be initialized to use this new
4104 * node itself as well.
4105 * To use this new node's memory, further consideration will be
4106 * necessary.
4107 */
8691f3a7 4108 zone->wait_table = vmalloc(alloc_size);
cca448fe
YG
4109 }
4110 if (!zone->wait_table)
4111 return -ENOMEM;
ed8ece2e 4112
02b694de 4113 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
ed8ece2e 4114 init_waitqueue_head(zone->wait_table + i);
cca448fe
YG
4115
4116 return 0;
ed8ece2e
DH
4117}
4118
c09b4240 4119static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e 4120{
99dcc3e5
CL
4121 /*
4122 * per cpu subsystem is not up at this point. The following code
4123 * relies on the ability of the linker to provide the
4124 * offset of a (static) per cpu variable into the per cpu area.
4125 */
4126 zone->pageset = &boot_pageset;
ed8ece2e 4127
f5335c0f 4128 if (zone->present_pages)
99dcc3e5
CL
4129 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
4130 zone->name, zone->present_pages,
4131 zone_batchsize(zone));
ed8ece2e
DH
4132}
4133
4ed7e022 4134int __meminit init_currently_empty_zone(struct zone *zone,
718127cc 4135 unsigned long zone_start_pfn,
a2f3aa02
DH
4136 unsigned long size,
4137 enum memmap_context context)
ed8ece2e
DH
4138{
4139 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe
YG
4140 int ret;
4141 ret = zone_wait_table_init(zone, size);
4142 if (ret)
4143 return ret;
ed8ece2e
DH
4144 pgdat->nr_zones = zone_idx(zone) + 1;
4145
ed8ece2e
DH
4146 zone->zone_start_pfn = zone_start_pfn;
4147
708614e6
MG
4148 mminit_dprintk(MMINIT_TRACE, "memmap_init",
4149 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
4150 pgdat->node_id,
4151 (unsigned long)zone_idx(zone),
4152 zone_start_pfn, (zone_start_pfn + size));
4153
1e548deb 4154 zone_init_free_lists(zone);
718127cc
YG
4155
4156 return 0;
ed8ece2e
DH
4157}
4158
0ee332c1 4159#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
c713216d
MG
4160#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
4161/*
4162 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
4163 * Architectures may implement their own version but if add_active_range()
4164 * was used and there are no special requirements, this is a convenient
4165 * alternative
4166 */
f2dbcfa7 4167int __meminit __early_pfn_to_nid(unsigned long pfn)
c713216d 4168{
c13291a5
TH
4169 unsigned long start_pfn, end_pfn;
4170 int i, nid;
c713216d 4171
c13291a5 4172 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
c713216d 4173 if (start_pfn <= pfn && pfn < end_pfn)
c13291a5 4174 return nid;
cc2559bc
KH
4175 /* This is a memory hole */
4176 return -1;
c713216d
MG
4177}
4178#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
4179
f2dbcfa7
KH
4180int __meminit early_pfn_to_nid(unsigned long pfn)
4181{
cc2559bc
KH
4182 int nid;
4183
4184 nid = __early_pfn_to_nid(pfn);
4185 if (nid >= 0)
4186 return nid;
4187 /* just returns 0 */
4188 return 0;
f2dbcfa7
KH
4189}
4190
cc2559bc
KH
4191#ifdef CONFIG_NODES_SPAN_OTHER_NODES
4192bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
4193{
4194 int nid;
4195
4196 nid = __early_pfn_to_nid(pfn);
4197 if (nid >= 0 && nid != node)
4198 return false;
4199 return true;
4200}
4201#endif
f2dbcfa7 4202
c713216d
MG
4203/**
4204 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
88ca3b94
RD
4205 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
4206 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
c713216d
MG
4207 *
4208 * If an architecture guarantees that all ranges registered with
4209 * add_active_ranges() contain no holes and may be freed, this
4210 * this function may be used instead of calling free_bootmem() manually.
4211 */
c13291a5 4212void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
cc289894 4213{
c13291a5
TH
4214 unsigned long start_pfn, end_pfn;
4215 int i, this_nid;
edbe7d23 4216
c13291a5
TH
4217 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
4218 start_pfn = min(start_pfn, max_low_pfn);
4219 end_pfn = min(end_pfn, max_low_pfn);
edbe7d23 4220
c13291a5
TH
4221 if (start_pfn < end_pfn)
4222 free_bootmem_node(NODE_DATA(this_nid),
4223 PFN_PHYS(start_pfn),
4224 (end_pfn - start_pfn) << PAGE_SHIFT);
edbe7d23 4225 }
edbe7d23 4226}
edbe7d23 4227
c713216d
MG
4228/**
4229 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 4230 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d
MG
4231 *
4232 * If an architecture guarantees that all ranges registered with
4233 * add_active_ranges() contain no holes and may be freed, this
88ca3b94 4234 * function may be used instead of calling memory_present() manually.
c713216d
MG
4235 */
4236void __init sparse_memory_present_with_active_regions(int nid)
4237{
c13291a5
TH
4238 unsigned long start_pfn, end_pfn;
4239 int i, this_nid;
c713216d 4240
c13291a5
TH
4241 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
4242 memory_present(this_nid, start_pfn, end_pfn);
c713216d
MG
4243}
4244
4245/**
4246 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
4247 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
4248 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
4249 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
4250 *
4251 * It returns the start and end page frame of a node based on information
4252 * provided by an arch calling add_active_range(). If called for a node
4253 * with no available memory, a warning is printed and the start and end
88ca3b94 4254 * PFNs will be 0.
c713216d 4255 */
a3142c8e 4256void __meminit get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
4257 unsigned long *start_pfn, unsigned long *end_pfn)
4258{
c13291a5 4259 unsigned long this_start_pfn, this_end_pfn;
c713216d 4260 int i;
c13291a5 4261
c713216d
MG
4262 *start_pfn = -1UL;
4263 *end_pfn = 0;
4264
c13291a5
TH
4265 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
4266 *start_pfn = min(*start_pfn, this_start_pfn);
4267 *end_pfn = max(*end_pfn, this_end_pfn);
c713216d
MG
4268 }
4269
633c0666 4270 if (*start_pfn == -1UL)
c713216d 4271 *start_pfn = 0;
c713216d
MG
4272}
4273
2a1e274a
MG
4274/*
4275 * This finds a zone that can be used for ZONE_MOVABLE pages. The
4276 * assumption is made that zones within a node are ordered in monotonic
4277 * increasing memory addresses so that the "highest" populated zone is used
4278 */
b69a7288 4279static void __init find_usable_zone_for_movable(void)
2a1e274a
MG
4280{
4281 int zone_index;
4282 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
4283 if (zone_index == ZONE_MOVABLE)
4284 continue;
4285
4286 if (arch_zone_highest_possible_pfn[zone_index] >
4287 arch_zone_lowest_possible_pfn[zone_index])
4288 break;
4289 }
4290
4291 VM_BUG_ON(zone_index == -1);
4292 movable_zone = zone_index;
4293}
4294
4295/*
4296 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
25985edc 4297 * because it is sized independent of architecture. Unlike the other zones,
2a1e274a
MG
4298 * the starting point for ZONE_MOVABLE is not fixed. It may be different
4299 * in each node depending on the size of each node and how evenly kernelcore
4300 * is distributed. This helper function adjusts the zone ranges
4301 * provided by the architecture for a given node by using the end of the
4302 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4303 * zones within a node are in order of monotonic increases memory addresses
4304 */
b69a7288 4305static void __meminit adjust_zone_range_for_zone_movable(int nid,
2a1e274a
MG
4306 unsigned long zone_type,
4307 unsigned long node_start_pfn,
4308 unsigned long node_end_pfn,
4309 unsigned long *zone_start_pfn,
4310 unsigned long *zone_end_pfn)
4311{
4312 /* Only adjust if ZONE_MOVABLE is on this node */
4313 if (zone_movable_pfn[nid]) {
4314 /* Size ZONE_MOVABLE */
4315 if (zone_type == ZONE_MOVABLE) {
4316 *zone_start_pfn = zone_movable_pfn[nid];
4317 *zone_end_pfn = min(node_end_pfn,
4318 arch_zone_highest_possible_pfn[movable_zone]);
4319
4320 /* Adjust for ZONE_MOVABLE starting within this range */
4321 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
4322 *zone_end_pfn > zone_movable_pfn[nid]) {
4323 *zone_end_pfn = zone_movable_pfn[nid];
4324
4325 /* Check if this whole range is within ZONE_MOVABLE */
4326 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
4327 *zone_start_pfn = *zone_end_pfn;
4328 }
4329}
4330
c713216d
MG
4331/*
4332 * Return the number of pages a zone spans in a node, including holes
4333 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
4334 */
6ea6e688 4335static unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
4336 unsigned long zone_type,
4337 unsigned long *ignored)
4338{
4339 unsigned long node_start_pfn, node_end_pfn;
4340 unsigned long zone_start_pfn, zone_end_pfn;
4341
4342 /* Get the start and end of the node and zone */
4343 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4344 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4345 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2a1e274a
MG
4346 adjust_zone_range_for_zone_movable(nid, zone_type,
4347 node_start_pfn, node_end_pfn,
4348 &zone_start_pfn, &zone_end_pfn);
c713216d
MG
4349
4350 /* Check that this node has pages within the zone's required range */
4351 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4352 return 0;
4353
4354 /* Move the zone boundaries inside the node if necessary */
4355 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4356 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4357
4358 /* Return the spanned pages */
4359 return zone_end_pfn - zone_start_pfn;
4360}
4361
4362/*
4363 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 4364 * then all holes in the requested range will be accounted for.
c713216d 4365 */
32996250 4366unsigned long __meminit __absent_pages_in_range(int nid,
c713216d
MG
4367 unsigned long range_start_pfn,
4368 unsigned long range_end_pfn)
4369{
96e907d1
TH
4370 unsigned long nr_absent = range_end_pfn - range_start_pfn;
4371 unsigned long start_pfn, end_pfn;
4372 int i;
c713216d 4373
96e907d1
TH
4374 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4375 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
4376 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
4377 nr_absent -= end_pfn - start_pfn;
c713216d 4378 }
96e907d1 4379 return nr_absent;
c713216d
MG
4380}
4381
4382/**
4383 * absent_pages_in_range - Return number of page frames in holes within a range
4384 * @start_pfn: The start PFN to start searching for holes
4385 * @end_pfn: The end PFN to stop searching for holes
4386 *
88ca3b94 4387 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
4388 */
4389unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4390 unsigned long end_pfn)
4391{
4392 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4393}
4394
4395/* Return the number of page frames in holes in a zone on a node */
6ea6e688 4396static unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
4397 unsigned long zone_type,
4398 unsigned long *ignored)
4399{
96e907d1
TH
4400 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
4401 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
9c7cd687
MG
4402 unsigned long node_start_pfn, node_end_pfn;
4403 unsigned long zone_start_pfn, zone_end_pfn;
4404
4405 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
96e907d1
TH
4406 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
4407 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
9c7cd687 4408
2a1e274a
MG
4409 adjust_zone_range_for_zone_movable(nid, zone_type,
4410 node_start_pfn, node_end_pfn,
4411 &zone_start_pfn, &zone_end_pfn);
9c7cd687 4412 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
c713216d 4413}
0e0b864e 4414
6981ec31
TC
4415/**
4416 * sanitize_zone_movable_limit - Sanitize the zone_movable_limit array.
4417 *
4418 * zone_movable_limit is initialized as 0. This function will try to get
4419 * the first ZONE_MOVABLE pfn of each node from movablemem_map, and
4420 * assigne them to zone_movable_limit.
4421 * zone_movable_limit[nid] == 0 means no limit for the node.
4422 *
4423 * Note: Each range is represented as [start_pfn, end_pfn)
4424 */
4425static void __meminit sanitize_zone_movable_limit(void)
4426{
4427 int map_pos = 0, i, nid;
4428 unsigned long start_pfn, end_pfn;
4429
4430 if (!movablemem_map.nr_map)
4431 return;
4432
4433 /* Iterate all ranges from minimum to maximum */
4434 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4435 /*
4436 * If we have found lowest pfn of ZONE_MOVABLE of the node
4437 * specified by user, just go on to check next range.
4438 */
4439 if (zone_movable_limit[nid])
4440 continue;
4441
4442#ifdef CONFIG_ZONE_DMA
4443 /* Skip DMA memory. */
4444 if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA])
4445 start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA];
4446#endif
4447
4448#ifdef CONFIG_ZONE_DMA32
4449 /* Skip DMA32 memory. */
4450 if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA32])
4451 start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA32];
4452#endif
4453
4454#ifdef CONFIG_HIGHMEM
4455 /* Skip lowmem if ZONE_MOVABLE is highmem. */
4456 if (zone_movable_is_highmem() &&
4457 start_pfn < arch_zone_lowest_possible_pfn[ZONE_HIGHMEM])
4458 start_pfn = arch_zone_lowest_possible_pfn[ZONE_HIGHMEM];
4459#endif
4460
4461 if (start_pfn >= end_pfn)
4462 continue;
4463
4464 while (map_pos < movablemem_map.nr_map) {
4465 if (end_pfn <= movablemem_map.map[map_pos].start_pfn)
4466 break;
4467
4468 if (start_pfn >= movablemem_map.map[map_pos].end_pfn) {
4469 map_pos++;
4470 continue;
4471 }
4472
4473 /*
4474 * The start_pfn of ZONE_MOVABLE is either the minimum
4475 * pfn specified by movablemem_map, or 0, which means
4476 * the node has no ZONE_MOVABLE.
4477 */
4478 zone_movable_limit[nid] = max(start_pfn,
4479 movablemem_map.map[map_pos].start_pfn);
4480
4481 break;
4482 }
4483 }
4484}
4485
0ee332c1 4486#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6ea6e688 4487static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
4488 unsigned long zone_type,
4489 unsigned long *zones_size)
4490{
4491 return zones_size[zone_type];
4492}
4493
6ea6e688 4494static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
4495 unsigned long zone_type,
4496 unsigned long *zholes_size)
4497{
4498 if (!zholes_size)
4499 return 0;
4500
4501 return zholes_size[zone_type];
4502}
0ee332c1 4503#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 4504
a3142c8e 4505static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
c713216d
MG
4506 unsigned long *zones_size, unsigned long *zholes_size)
4507{
4508 unsigned long realtotalpages, totalpages = 0;
4509 enum zone_type i;
4510
4511 for (i = 0; i < MAX_NR_ZONES; i++)
4512 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4513 zones_size);
4514 pgdat->node_spanned_pages = totalpages;
4515
4516 realtotalpages = totalpages;
4517 for (i = 0; i < MAX_NR_ZONES; i++)
4518 realtotalpages -=
4519 zone_absent_pages_in_node(pgdat->node_id, i,
4520 zholes_size);
4521 pgdat->node_present_pages = realtotalpages;
4522 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4523 realtotalpages);
4524}
4525
835c134e
MG
4526#ifndef CONFIG_SPARSEMEM
4527/*
4528 * Calculate the size of the zone->blockflags rounded to an unsigned long
d9c23400
MG
4529 * Start by making sure zonesize is a multiple of pageblock_order by rounding
4530 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
835c134e
MG
4531 * round what is now in bits to nearest long in bits, then return it in
4532 * bytes.
4533 */
7c45512d 4534static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
835c134e
MG
4535{
4536 unsigned long usemapsize;
4537
7c45512d 4538 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
d9c23400
MG
4539 usemapsize = roundup(zonesize, pageblock_nr_pages);
4540 usemapsize = usemapsize >> pageblock_order;
835c134e
MG
4541 usemapsize *= NR_PAGEBLOCK_BITS;
4542 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4543
4544 return usemapsize / 8;
4545}
4546
4547static void __init setup_usemap(struct pglist_data *pgdat,
7c45512d
LT
4548 struct zone *zone,
4549 unsigned long zone_start_pfn,
4550 unsigned long zonesize)
835c134e 4551{
7c45512d 4552 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
835c134e 4553 zone->pageblock_flags = NULL;
58a01a45 4554 if (usemapsize)
8f389a99
YL
4555 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4556 usemapsize);
835c134e
MG
4557}
4558#else
7c45512d
LT
4559static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
4560 unsigned long zone_start_pfn, unsigned long zonesize) {}
835c134e
MG
4561#endif /* CONFIG_SPARSEMEM */
4562
d9c23400 4563#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
ba72cb8c 4564
d9c23400 4565/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
ca57df79 4566void __init set_pageblock_order(void)
d9c23400 4567{
955c1cd7
AM
4568 unsigned int order;
4569
d9c23400
MG
4570 /* Check that pageblock_nr_pages has not already been setup */
4571 if (pageblock_order)
4572 return;
4573
955c1cd7
AM
4574 if (HPAGE_SHIFT > PAGE_SHIFT)
4575 order = HUGETLB_PAGE_ORDER;
4576 else
4577 order = MAX_ORDER - 1;
4578
d9c23400
MG
4579 /*
4580 * Assume the largest contiguous order of interest is a huge page.
955c1cd7
AM
4581 * This value may be variable depending on boot parameters on IA64 and
4582 * powerpc.
d9c23400
MG
4583 */
4584 pageblock_order = order;
4585}
4586#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4587
ba72cb8c
MG
4588/*
4589 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
955c1cd7
AM
4590 * is unused as pageblock_order is set at compile-time. See
4591 * include/linux/pageblock-flags.h for the values of pageblock_order based on
4592 * the kernel config
ba72cb8c 4593 */
ca57df79 4594void __init set_pageblock_order(void)
ba72cb8c 4595{
ba72cb8c 4596}
d9c23400
MG
4597
4598#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4599
01cefaef
JL
4600static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
4601 unsigned long present_pages)
4602{
4603 unsigned long pages = spanned_pages;
4604
4605 /*
4606 * Provide a more accurate estimation if there are holes within
4607 * the zone and SPARSEMEM is in use. If there are holes within the
4608 * zone, each populated memory region may cost us one or two extra
4609 * memmap pages due to alignment because memmap pages for each
4610 * populated regions may not naturally algined on page boundary.
4611 * So the (present_pages >> 4) heuristic is a tradeoff for that.
4612 */
4613 if (spanned_pages > present_pages + (present_pages >> 4) &&
4614 IS_ENABLED(CONFIG_SPARSEMEM))
4615 pages = present_pages;
4616
4617 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
4618}
4619
1da177e4
LT
4620/*
4621 * Set up the zone data structures:
4622 * - mark all pages reserved
4623 * - mark all memory queues empty
4624 * - clear the memory bitmaps
6527af5d
MK
4625 *
4626 * NOTE: pgdat should get zeroed by caller.
1da177e4 4627 */
b5a0e011 4628static void __paginginit free_area_init_core(struct pglist_data *pgdat,
1da177e4
LT
4629 unsigned long *zones_size, unsigned long *zholes_size)
4630{
2f1b6248 4631 enum zone_type j;
ed8ece2e 4632 int nid = pgdat->node_id;
1da177e4 4633 unsigned long zone_start_pfn = pgdat->node_start_pfn;
718127cc 4634 int ret;
1da177e4 4635
208d54e5 4636 pgdat_resize_init(pgdat);
8177a420
AA
4637#ifdef CONFIG_NUMA_BALANCING
4638 spin_lock_init(&pgdat->numabalancing_migrate_lock);
4639 pgdat->numabalancing_migrate_nr_pages = 0;
4640 pgdat->numabalancing_migrate_next_window = jiffies;
4641#endif
1da177e4 4642 init_waitqueue_head(&pgdat->kswapd_wait);
5515061d 4643 init_waitqueue_head(&pgdat->pfmemalloc_wait);
52d4b9ac 4644 pgdat_page_cgroup_init(pgdat);
5f63b720 4645
1da177e4
LT
4646 for (j = 0; j < MAX_NR_ZONES; j++) {
4647 struct zone *zone = pgdat->node_zones + j;
9feedc9d 4648 unsigned long size, realsize, freesize, memmap_pages;
1da177e4 4649
c713216d 4650 size = zone_spanned_pages_in_node(nid, j, zones_size);
9feedc9d 4651 realsize = freesize = size - zone_absent_pages_in_node(nid, j,
c713216d 4652 zholes_size);
1da177e4 4653
0e0b864e 4654 /*
9feedc9d 4655 * Adjust freesize so that it accounts for how much memory
0e0b864e
MG
4656 * is used by this zone for memmap. This affects the watermark
4657 * and per-cpu initialisations
4658 */
01cefaef 4659 memmap_pages = calc_memmap_size(size, realsize);
9feedc9d
JL
4660 if (freesize >= memmap_pages) {
4661 freesize -= memmap_pages;
5594c8c8
YL
4662 if (memmap_pages)
4663 printk(KERN_DEBUG
4664 " %s zone: %lu pages used for memmap\n",
4665 zone_names[j], memmap_pages);
0e0b864e
MG
4666 } else
4667 printk(KERN_WARNING
9feedc9d
JL
4668 " %s zone: %lu pages exceeds freesize %lu\n",
4669 zone_names[j], memmap_pages, freesize);
0e0b864e 4670
6267276f 4671 /* Account for reserved pages */
9feedc9d
JL
4672 if (j == 0 && freesize > dma_reserve) {
4673 freesize -= dma_reserve;
d903ef9f 4674 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
6267276f 4675 zone_names[0], dma_reserve);
0e0b864e
MG
4676 }
4677
98d2b0eb 4678 if (!is_highmem_idx(j))
9feedc9d 4679 nr_kernel_pages += freesize;
01cefaef
JL
4680 /* Charge for highmem memmap if there are enough kernel pages */
4681 else if (nr_kernel_pages > memmap_pages * 2)
4682 nr_kernel_pages -= memmap_pages;
9feedc9d 4683 nr_all_pages += freesize;
1da177e4
LT
4684
4685 zone->spanned_pages = size;
306f2e9e 4686 zone->present_pages = realsize;
9feedc9d
JL
4687 /*
4688 * Set an approximate value for lowmem here, it will be adjusted
4689 * when the bootmem allocator frees pages into the buddy system.
4690 * And all highmem pages will be managed by the buddy system.
4691 */
4692 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
9614634f 4693#ifdef CONFIG_NUMA
d5f541ed 4694 zone->node = nid;
9feedc9d 4695 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
9614634f 4696 / 100;
9feedc9d 4697 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
9614634f 4698#endif
1da177e4
LT
4699 zone->name = zone_names[j];
4700 spin_lock_init(&zone->lock);
4701 spin_lock_init(&zone->lru_lock);
bdc8cb98 4702 zone_seqlock_init(zone);
1da177e4 4703 zone->zone_pgdat = pgdat;
1da177e4 4704
ed8ece2e 4705 zone_pcp_init(zone);
bea8c150 4706 lruvec_init(&zone->lruvec);
1da177e4
LT
4707 if (!size)
4708 continue;
4709
955c1cd7 4710 set_pageblock_order();
7c45512d 4711 setup_usemap(pgdat, zone, zone_start_pfn, size);
a2f3aa02
DH
4712 ret = init_currently_empty_zone(zone, zone_start_pfn,
4713 size, MEMMAP_EARLY);
718127cc 4714 BUG_ON(ret);
76cdd58e 4715 memmap_init(size, nid, j, zone_start_pfn);
1da177e4 4716 zone_start_pfn += size;
1da177e4
LT
4717 }
4718}
4719
577a32f6 4720static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 4721{
1da177e4
LT
4722 /* Skip empty nodes */
4723 if (!pgdat->node_spanned_pages)
4724 return;
4725
d41dee36 4726#ifdef CONFIG_FLAT_NODE_MEM_MAP
1da177e4
LT
4727 /* ia64 gets its own node_mem_map, before this, without bootmem */
4728 if (!pgdat->node_mem_map) {
e984bb43 4729 unsigned long size, start, end;
d41dee36
AW
4730 struct page *map;
4731
e984bb43
BP
4732 /*
4733 * The zone's endpoints aren't required to be MAX_ORDER
4734 * aligned but the node_mem_map endpoints must be in order
4735 * for the buddy allocator to function correctly.
4736 */
4737 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
108bcc96 4738 end = pgdat_end_pfn(pgdat);
e984bb43
BP
4739 end = ALIGN(end, MAX_ORDER_NR_PAGES);
4740 size = (end - start) * sizeof(struct page);
6f167ec7
DH
4741 map = alloc_remap(pgdat->node_id, size);
4742 if (!map)
8f389a99 4743 map = alloc_bootmem_node_nopanic(pgdat, size);
e984bb43 4744 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
1da177e4 4745 }
12d810c1 4746#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
4747 /*
4748 * With no DISCONTIG, the global mem_map is just set as node 0's
4749 */
c713216d 4750 if (pgdat == NODE_DATA(0)) {
1da177e4 4751 mem_map = NODE_DATA(0)->node_mem_map;
0ee332c1 4752#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
c713216d 4753 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
467bc461 4754 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
0ee332c1 4755#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 4756 }
1da177e4 4757#endif
d41dee36 4758#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
4759}
4760
9109fb7b
JW
4761void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4762 unsigned long node_start_pfn, unsigned long *zholes_size)
1da177e4 4763{
9109fb7b
JW
4764 pg_data_t *pgdat = NODE_DATA(nid);
4765
88fdf75d 4766 /* pg_data_t should be reset to zero when it's allocated */
8783b6e2 4767 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
88fdf75d 4768
1da177e4
LT
4769 pgdat->node_id = nid;
4770 pgdat->node_start_pfn = node_start_pfn;
957f822a 4771 init_zone_allows_reclaim(nid);
c713216d 4772 calculate_node_totalpages(pgdat, zones_size, zholes_size);
1da177e4
LT
4773
4774 alloc_node_mem_map(pgdat);
e8c27ac9
YL
4775#ifdef CONFIG_FLAT_NODE_MEM_MAP
4776 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4777 nid, (unsigned long)pgdat,
4778 (unsigned long)pgdat->node_mem_map);
4779#endif
1da177e4
LT
4780
4781 free_area_init_core(pgdat, zones_size, zholes_size);
4782}
4783
0ee332c1 4784#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
418508c1
MS
4785
4786#if MAX_NUMNODES > 1
4787/*
4788 * Figure out the number of possible node ids.
4789 */
4790static void __init setup_nr_node_ids(void)
4791{
4792 unsigned int node;
4793 unsigned int highest = 0;
4794
4795 for_each_node_mask(node, node_possible_map)
4796 highest = node;
4797 nr_node_ids = highest + 1;
4798}
4799#else
4800static inline void setup_nr_node_ids(void)
4801{
4802}
4803#endif
4804
1e01979c
TH
4805/**
4806 * node_map_pfn_alignment - determine the maximum internode alignment
4807 *
4808 * This function should be called after node map is populated and sorted.
4809 * It calculates the maximum power of two alignment which can distinguish
4810 * all the nodes.
4811 *
4812 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
4813 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
4814 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
4815 * shifted, 1GiB is enough and this function will indicate so.
4816 *
4817 * This is used to test whether pfn -> nid mapping of the chosen memory
4818 * model has fine enough granularity to avoid incorrect mapping for the
4819 * populated node map.
4820 *
4821 * Returns the determined alignment in pfn's. 0 if there is no alignment
4822 * requirement (single node).
4823 */
4824unsigned long __init node_map_pfn_alignment(void)
4825{
4826 unsigned long accl_mask = 0, last_end = 0;
c13291a5 4827 unsigned long start, end, mask;
1e01979c 4828 int last_nid = -1;
c13291a5 4829 int i, nid;
1e01979c 4830
c13291a5 4831 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1e01979c
TH
4832 if (!start || last_nid < 0 || last_nid == nid) {
4833 last_nid = nid;
4834 last_end = end;
4835 continue;
4836 }
4837
4838 /*
4839 * Start with a mask granular enough to pin-point to the
4840 * start pfn and tick off bits one-by-one until it becomes
4841 * too coarse to separate the current node from the last.
4842 */
4843 mask = ~((1 << __ffs(start)) - 1);
4844 while (mask && last_end <= (start & (mask << 1)))
4845 mask <<= 1;
4846
4847 /* accumulate all internode masks */
4848 accl_mask |= mask;
4849 }
4850
4851 /* convert mask to number of pages */
4852 return ~accl_mask + 1;
4853}
4854
a6af2bc3 4855/* Find the lowest pfn for a node */
b69a7288 4856static unsigned long __init find_min_pfn_for_node(int nid)
c713216d 4857{
a6af2bc3 4858 unsigned long min_pfn = ULONG_MAX;
c13291a5
TH
4859 unsigned long start_pfn;
4860 int i;
1abbfb41 4861
c13291a5
TH
4862 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
4863 min_pfn = min(min_pfn, start_pfn);
c713216d 4864
a6af2bc3
MG
4865 if (min_pfn == ULONG_MAX) {
4866 printk(KERN_WARNING
2bc0d261 4867 "Could not find start_pfn for node %d\n", nid);
a6af2bc3
MG
4868 return 0;
4869 }
4870
4871 return min_pfn;
c713216d
MG
4872}
4873
4874/**
4875 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4876 *
4877 * It returns the minimum PFN based on information provided via
88ca3b94 4878 * add_active_range().
c713216d
MG
4879 */
4880unsigned long __init find_min_pfn_with_active_regions(void)
4881{
4882 return find_min_pfn_for_node(MAX_NUMNODES);
4883}
4884
37b07e41
LS
4885/*
4886 * early_calculate_totalpages()
4887 * Sum pages in active regions for movable zone.
4b0ef1fe 4888 * Populate N_MEMORY for calculating usable_nodes.
37b07e41 4889 */
484f51f8 4890static unsigned long __init early_calculate_totalpages(void)
7e63efef 4891{
7e63efef 4892 unsigned long totalpages = 0;
c13291a5
TH
4893 unsigned long start_pfn, end_pfn;
4894 int i, nid;
4895
4896 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4897 unsigned long pages = end_pfn - start_pfn;
7e63efef 4898
37b07e41
LS
4899 totalpages += pages;
4900 if (pages)
4b0ef1fe 4901 node_set_state(nid, N_MEMORY);
37b07e41
LS
4902 }
4903 return totalpages;
7e63efef
MG
4904}
4905
2a1e274a
MG
4906/*
4907 * Find the PFN the Movable zone begins in each node. Kernel memory
4908 * is spread evenly between nodes as long as the nodes have enough
4909 * memory. When they don't, some nodes will have more kernelcore than
4910 * others
4911 */
b224ef85 4912static void __init find_zone_movable_pfns_for_nodes(void)
2a1e274a
MG
4913{
4914 int i, nid;
4915 unsigned long usable_startpfn;
4916 unsigned long kernelcore_node, kernelcore_remaining;
66918dcd 4917 /* save the state before borrow the nodemask */
4b0ef1fe 4918 nodemask_t saved_node_state = node_states[N_MEMORY];
37b07e41 4919 unsigned long totalpages = early_calculate_totalpages();
4b0ef1fe 4920 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
2a1e274a 4921
7e63efef
MG
4922 /*
4923 * If movablecore was specified, calculate what size of
4924 * kernelcore that corresponds so that memory usable for
4925 * any allocation type is evenly spread. If both kernelcore
4926 * and movablecore are specified, then the value of kernelcore
4927 * will be used for required_kernelcore if it's greater than
4928 * what movablecore would have allowed.
4929 */
4930 if (required_movablecore) {
7e63efef
MG
4931 unsigned long corepages;
4932
4933 /*
4934 * Round-up so that ZONE_MOVABLE is at least as large as what
4935 * was requested by the user
4936 */
4937 required_movablecore =
4938 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4939 corepages = totalpages - required_movablecore;
4940
4941 required_kernelcore = max(required_kernelcore, corepages);
4942 }
4943
42f47e27
TC
4944 /*
4945 * If neither kernelcore/movablecore nor movablemem_map is specified,
4946 * there is no ZONE_MOVABLE. But if movablemem_map is specified, the
4947 * start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[].
4948 */
4949 if (!required_kernelcore) {
4950 if (movablemem_map.nr_map)
4951 memcpy(zone_movable_pfn, zone_movable_limit,
4952 sizeof(zone_movable_pfn));
66918dcd 4953 goto out;
42f47e27 4954 }
2a1e274a
MG
4955
4956 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
2a1e274a
MG
4957 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4958
4959restart:
4960 /* Spread kernelcore memory as evenly as possible throughout nodes */
4961 kernelcore_node = required_kernelcore / usable_nodes;
4b0ef1fe 4962 for_each_node_state(nid, N_MEMORY) {
c13291a5
TH
4963 unsigned long start_pfn, end_pfn;
4964
2a1e274a
MG
4965 /*
4966 * Recalculate kernelcore_node if the division per node
4967 * now exceeds what is necessary to satisfy the requested
4968 * amount of memory for the kernel
4969 */
4970 if (required_kernelcore < kernelcore_node)
4971 kernelcore_node = required_kernelcore / usable_nodes;
4972
4973 /*
4974 * As the map is walked, we track how much memory is usable
4975 * by the kernel using kernelcore_remaining. When it is
4976 * 0, the rest of the node is usable by ZONE_MOVABLE
4977 */
4978 kernelcore_remaining = kernelcore_node;
4979
4980 /* Go through each range of PFNs within this node */
c13291a5 4981 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2a1e274a
MG
4982 unsigned long size_pages;
4983
42f47e27
TC
4984 /*
4985 * Find more memory for kernelcore in
4986 * [zone_movable_pfn[nid], zone_movable_limit[nid]).
4987 */
c13291a5 4988 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
2a1e274a
MG
4989 if (start_pfn >= end_pfn)
4990 continue;
4991
42f47e27
TC
4992 if (zone_movable_limit[nid]) {
4993 end_pfn = min(end_pfn, zone_movable_limit[nid]);
4994 /* No range left for kernelcore in this node */
4995 if (start_pfn >= end_pfn) {
4996 zone_movable_pfn[nid] =
4997 zone_movable_limit[nid];
4998 break;
4999 }
5000 }
5001
2a1e274a
MG
5002 /* Account for what is only usable for kernelcore */
5003 if (start_pfn < usable_startpfn) {
5004 unsigned long kernel_pages;
5005 kernel_pages = min(end_pfn, usable_startpfn)
5006 - start_pfn;
5007
5008 kernelcore_remaining -= min(kernel_pages,
5009 kernelcore_remaining);
5010 required_kernelcore -= min(kernel_pages,
5011 required_kernelcore);
5012
5013 /* Continue if range is now fully accounted */
5014 if (end_pfn <= usable_startpfn) {
5015
5016 /*
5017 * Push zone_movable_pfn to the end so
5018 * that if we have to rebalance
5019 * kernelcore across nodes, we will
5020 * not double account here
5021 */
5022 zone_movable_pfn[nid] = end_pfn;
5023 continue;
5024 }
5025 start_pfn = usable_startpfn;
5026 }
5027
5028 /*
5029 * The usable PFN range for ZONE_MOVABLE is from
5030 * start_pfn->end_pfn. Calculate size_pages as the
5031 * number of pages used as kernelcore
5032 */
5033 size_pages = end_pfn - start_pfn;
5034 if (size_pages > kernelcore_remaining)
5035 size_pages = kernelcore_remaining;
5036 zone_movable_pfn[nid] = start_pfn + size_pages;
5037
5038 /*
5039 * Some kernelcore has been met, update counts and
5040 * break if the kernelcore for this node has been
5041 * satisified
5042 */
5043 required_kernelcore -= min(required_kernelcore,
5044 size_pages);
5045 kernelcore_remaining -= size_pages;
5046 if (!kernelcore_remaining)
5047 break;
5048 }
5049 }
5050
5051 /*
5052 * If there is still required_kernelcore, we do another pass with one
5053 * less node in the count. This will push zone_movable_pfn[nid] further
5054 * along on the nodes that still have memory until kernelcore is
5055 * satisified
5056 */
5057 usable_nodes--;
5058 if (usable_nodes && required_kernelcore > usable_nodes)
5059 goto restart;
5060
42f47e27 5061out:
2a1e274a
MG
5062 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
5063 for (nid = 0; nid < MAX_NUMNODES; nid++)
5064 zone_movable_pfn[nid] =
5065 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
66918dcd 5066
66918dcd 5067 /* restore the node_state */
4b0ef1fe 5068 node_states[N_MEMORY] = saved_node_state;
2a1e274a
MG
5069}
5070
4b0ef1fe
LJ
5071/* Any regular or high memory on that node ? */
5072static void check_for_memory(pg_data_t *pgdat, int nid)
37b07e41 5073{
37b07e41
LS
5074 enum zone_type zone_type;
5075
4b0ef1fe
LJ
5076 if (N_MEMORY == N_NORMAL_MEMORY)
5077 return;
5078
5079 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
37b07e41 5080 struct zone *zone = &pgdat->node_zones[zone_type];
d0048b0e 5081 if (zone->present_pages) {
4b0ef1fe
LJ
5082 node_set_state(nid, N_HIGH_MEMORY);
5083 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
5084 zone_type <= ZONE_NORMAL)
5085 node_set_state(nid, N_NORMAL_MEMORY);
d0048b0e
BL
5086 break;
5087 }
37b07e41 5088 }
37b07e41
LS
5089}
5090
c713216d
MG
5091/**
5092 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 5093 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
5094 *
5095 * This will call free_area_init_node() for each active node in the system.
5096 * Using the page ranges provided by add_active_range(), the size of each
5097 * zone in each node and their holes is calculated. If the maximum PFN
5098 * between two adjacent zones match, it is assumed that the zone is empty.
5099 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
5100 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
5101 * starts where the previous one ended. For example, ZONE_DMA32 starts
5102 * at arch_max_dma_pfn.
5103 */
5104void __init free_area_init_nodes(unsigned long *max_zone_pfn)
5105{
c13291a5
TH
5106 unsigned long start_pfn, end_pfn;
5107 int i, nid;
a6af2bc3 5108
c713216d
MG
5109 /* Record where the zone boundaries are */
5110 memset(arch_zone_lowest_possible_pfn, 0,
5111 sizeof(arch_zone_lowest_possible_pfn));
5112 memset(arch_zone_highest_possible_pfn, 0,
5113 sizeof(arch_zone_highest_possible_pfn));
5114 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
5115 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
5116 for (i = 1; i < MAX_NR_ZONES; i++) {
2a1e274a
MG
5117 if (i == ZONE_MOVABLE)
5118 continue;
c713216d
MG
5119 arch_zone_lowest_possible_pfn[i] =
5120 arch_zone_highest_possible_pfn[i-1];
5121 arch_zone_highest_possible_pfn[i] =
5122 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
5123 }
2a1e274a
MG
5124 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
5125 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
5126
5127 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
5128 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
6981ec31
TC
5129 find_usable_zone_for_movable();
5130 sanitize_zone_movable_limit();
b224ef85 5131 find_zone_movable_pfns_for_nodes();
c713216d 5132
c713216d 5133 /* Print out the zone ranges */
a62e2f4f 5134 printk("Zone ranges:\n");
2a1e274a
MG
5135 for (i = 0; i < MAX_NR_ZONES; i++) {
5136 if (i == ZONE_MOVABLE)
5137 continue;
155cbfc8 5138 printk(KERN_CONT " %-8s ", zone_names[i]);
72f0ba02
DR
5139 if (arch_zone_lowest_possible_pfn[i] ==
5140 arch_zone_highest_possible_pfn[i])
155cbfc8 5141 printk(KERN_CONT "empty\n");
72f0ba02 5142 else
a62e2f4f
BH
5143 printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n",
5144 arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
5145 (arch_zone_highest_possible_pfn[i]
5146 << PAGE_SHIFT) - 1);
2a1e274a
MG
5147 }
5148
5149 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
a62e2f4f 5150 printk("Movable zone start for each node\n");
2a1e274a
MG
5151 for (i = 0; i < MAX_NUMNODES; i++) {
5152 if (zone_movable_pfn[i])
a62e2f4f
BH
5153 printk(" Node %d: %#010lx\n", i,
5154 zone_movable_pfn[i] << PAGE_SHIFT);
2a1e274a 5155 }
c713216d 5156
f2d52fe5 5157 /* Print out the early node map */
a62e2f4f 5158 printk("Early memory node ranges\n");
c13291a5 5159 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
a62e2f4f
BH
5160 printk(" node %3d: [mem %#010lx-%#010lx]\n", nid,
5161 start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
c713216d
MG
5162
5163 /* Initialise every node */
708614e6 5164 mminit_verify_pageflags_layout();
8ef82866 5165 setup_nr_node_ids();
c713216d
MG
5166 for_each_online_node(nid) {
5167 pg_data_t *pgdat = NODE_DATA(nid);
9109fb7b 5168 free_area_init_node(nid, NULL,
c713216d 5169 find_min_pfn_for_node(nid), NULL);
37b07e41
LS
5170
5171 /* Any memory on that node */
5172 if (pgdat->node_present_pages)
4b0ef1fe
LJ
5173 node_set_state(nid, N_MEMORY);
5174 check_for_memory(pgdat, nid);
c713216d
MG
5175 }
5176}
2a1e274a 5177
7e63efef 5178static int __init cmdline_parse_core(char *p, unsigned long *core)
2a1e274a
MG
5179{
5180 unsigned long long coremem;
5181 if (!p)
5182 return -EINVAL;
5183
5184 coremem = memparse(p, &p);
7e63efef 5185 *core = coremem >> PAGE_SHIFT;
2a1e274a 5186
7e63efef 5187 /* Paranoid check that UL is enough for the coremem value */
2a1e274a
MG
5188 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
5189
5190 return 0;
5191}
ed7ed365 5192
7e63efef
MG
5193/*
5194 * kernelcore=size sets the amount of memory for use for allocations that
5195 * cannot be reclaimed or migrated.
5196 */
5197static int __init cmdline_parse_kernelcore(char *p)
5198{
5199 return cmdline_parse_core(p, &required_kernelcore);
5200}
5201
5202/*
5203 * movablecore=size sets the amount of memory for use for allocations that
5204 * can be reclaimed or migrated.
5205 */
5206static int __init cmdline_parse_movablecore(char *p)
5207{
5208 return cmdline_parse_core(p, &required_movablecore);
5209}
5210
ed7ed365 5211early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 5212early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 5213
27168d38
TC
5214/**
5215 * movablemem_map_overlap() - Check if a range overlaps movablemem_map.map[].
5216 * @start_pfn: start pfn of the range to be checked
5217 * @end_pfn: end pfn of the range to be checked (exclusive)
5218 *
5219 * This function checks if a given memory range [start_pfn, end_pfn) overlaps
5220 * the movablemem_map.map[] array.
5221 *
5222 * Return: index of the first overlapped element in movablemem_map.map[]
5223 * or -1 if they don't overlap each other.
5224 */
5225int __init movablemem_map_overlap(unsigned long start_pfn,
5226 unsigned long end_pfn)
5227{
5228 int overlap;
5229
5230 if (!movablemem_map.nr_map)
5231 return -1;
5232
5233 for (overlap = 0; overlap < movablemem_map.nr_map; overlap++)
5234 if (start_pfn < movablemem_map.map[overlap].end_pfn)
5235 break;
5236
5237 if (overlap == movablemem_map.nr_map ||
5238 end_pfn <= movablemem_map.map[overlap].start_pfn)
5239 return -1;
5240
5241 return overlap;
5242}
5243
34b71f1e
TC
5244/**
5245 * insert_movablemem_map - Insert a memory range in to movablemem_map.map.
5246 * @start_pfn: start pfn of the range
5247 * @end_pfn: end pfn of the range
5248 *
5249 * This function will also merge the overlapped ranges, and sort the array
5250 * by start_pfn in monotonic increasing order.
5251 */
27168d38
TC
5252void __init insert_movablemem_map(unsigned long start_pfn,
5253 unsigned long end_pfn)
34b71f1e
TC
5254{
5255 int pos, overlap;
5256
5257 /*
5258 * pos will be at the 1st overlapped range, or the position
5259 * where the element should be inserted.
5260 */
5261 for (pos = 0; pos < movablemem_map.nr_map; pos++)
5262 if (start_pfn <= movablemem_map.map[pos].end_pfn)
5263 break;
5264
5265 /* If there is no overlapped range, just insert the element. */
5266 if (pos == movablemem_map.nr_map ||
5267 end_pfn < movablemem_map.map[pos].start_pfn) {
5268 /*
5269 * If pos is not the end of array, we need to move all
5270 * the rest elements backward.
5271 */
5272 if (pos < movablemem_map.nr_map)
5273 memmove(&movablemem_map.map[pos+1],
5274 &movablemem_map.map[pos],
5275 sizeof(struct movablemem_entry) *
5276 (movablemem_map.nr_map - pos));
5277 movablemem_map.map[pos].start_pfn = start_pfn;
5278 movablemem_map.map[pos].end_pfn = end_pfn;
5279 movablemem_map.nr_map++;
5280 return;
5281 }
5282
5283 /* overlap will be at the last overlapped range */
5284 for (overlap = pos + 1; overlap < movablemem_map.nr_map; overlap++)
5285 if (end_pfn < movablemem_map.map[overlap].start_pfn)
5286 break;
5287
5288 /*
5289 * If there are more ranges overlapped, we need to merge them,
5290 * and move the rest elements forward.
5291 */
5292 overlap--;
5293 movablemem_map.map[pos].start_pfn = min(start_pfn,
5294 movablemem_map.map[pos].start_pfn);
5295 movablemem_map.map[pos].end_pfn = max(end_pfn,
5296 movablemem_map.map[overlap].end_pfn);
5297
5298 if (pos != overlap && overlap + 1 != movablemem_map.nr_map)
5299 memmove(&movablemem_map.map[pos+1],
5300 &movablemem_map.map[overlap+1],
5301 sizeof(struct movablemem_entry) *
5302 (movablemem_map.nr_map - overlap - 1));
5303
5304 movablemem_map.nr_map -= overlap - pos;
5305}
5306
5307/**
5308 * movablemem_map_add_region - Add a memory range into movablemem_map.
5309 * @start: physical start address of range
5310 * @end: physical end address of range
5311 *
5312 * This function transform the physical address into pfn, and then add the
5313 * range into movablemem_map by calling insert_movablemem_map().
5314 */
5315static void __init movablemem_map_add_region(u64 start, u64 size)
5316{
5317 unsigned long start_pfn, end_pfn;
5318
5319 /* In case size == 0 or start + size overflows */
5320 if (start + size <= start)
5321 return;
5322
5323 if (movablemem_map.nr_map >= ARRAY_SIZE(movablemem_map.map)) {
5324 pr_err("movablemem_map: too many entries;"
5325 " ignoring [mem %#010llx-%#010llx]\n",
5326 (unsigned long long) start,
5327 (unsigned long long) (start + size - 1));
5328 return;
5329 }
5330
5331 start_pfn = PFN_DOWN(start);
5332 end_pfn = PFN_UP(start + size);
5333 insert_movablemem_map(start_pfn, end_pfn);
5334}
5335
5336/*
5337 * cmdline_parse_movablemem_map - Parse boot option movablemem_map.
5338 * @p: The boot option of the following format:
5339 * movablemem_map=nn[KMG]@ss[KMG]
5340 *
5341 * This option sets the memory range [ss, ss+nn) to be used as movable memory.
5342 *
5343 * Return: 0 on success or -EINVAL on failure.
5344 */
5345static int __init cmdline_parse_movablemem_map(char *p)
5346{
5347 char *oldp;
5348 u64 start_at, mem_size;
5349
5350 if (!p)
5351 goto err;
5352
01a178a9
TC
5353 if (!strcmp(p, "acpi"))
5354 movablemem_map.acpi = true;
5355
5356 /*
5357 * If user decide to use info from BIOS, all the other user specified
5358 * ranges will be ingored.
5359 */
5360 if (movablemem_map.acpi) {
5361 if (movablemem_map.nr_map) {
5362 memset(movablemem_map.map, 0,
5363 sizeof(struct movablemem_entry)
5364 * movablemem_map.nr_map);
5365 movablemem_map.nr_map = 0;
5366 }
5367 return 0;
5368 }
5369
34b71f1e
TC
5370 oldp = p;
5371 mem_size = memparse(p, &p);
5372 if (p == oldp)
5373 goto err;
5374
5375 if (*p == '@') {
5376 oldp = ++p;
5377 start_at = memparse(p, &p);
5378 if (p == oldp || *p != '\0')
5379 goto err;
5380
5381 movablemem_map_add_region(start_at, mem_size);
5382 return 0;
5383 }
5384err:
5385 return -EINVAL;
5386}
5387early_param("movablemem_map", cmdline_parse_movablemem_map);
5388
0ee332c1 5389#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 5390
0e0b864e 5391/**
88ca3b94
RD
5392 * set_dma_reserve - set the specified number of pages reserved in the first zone
5393 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e
MG
5394 *
5395 * The per-cpu batchsize and zone watermarks are determined by present_pages.
5396 * In the DMA zone, a significant percentage may be consumed by kernel image
5397 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
5398 * function may optionally be used to account for unfreeable pages in the
5399 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
5400 * smaller per-cpu batchsize.
0e0b864e
MG
5401 */
5402void __init set_dma_reserve(unsigned long new_dma_reserve)
5403{
5404 dma_reserve = new_dma_reserve;
5405}
5406
1da177e4
LT
5407void __init free_area_init(unsigned long *zones_size)
5408{
9109fb7b 5409 free_area_init_node(0, zones_size,
1da177e4
LT
5410 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
5411}
1da177e4 5412
1da177e4
LT
5413static int page_alloc_cpu_notify(struct notifier_block *self,
5414 unsigned long action, void *hcpu)
5415{
5416 int cpu = (unsigned long)hcpu;
1da177e4 5417
8bb78442 5418 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
f0cb3c76 5419 lru_add_drain_cpu(cpu);
9f8f2172
CL
5420 drain_pages(cpu);
5421
5422 /*
5423 * Spill the event counters of the dead processor
5424 * into the current processors event counters.
5425 * This artificially elevates the count of the current
5426 * processor.
5427 */
f8891e5e 5428 vm_events_fold_cpu(cpu);
9f8f2172
CL
5429
5430 /*
5431 * Zero the differential counters of the dead processor
5432 * so that the vm statistics are consistent.
5433 *
5434 * This is only okay since the processor is dead and cannot
5435 * race with what we are doing.
5436 */
2244b95a 5437 refresh_cpu_vm_stats(cpu);
1da177e4
LT
5438 }
5439 return NOTIFY_OK;
5440}
1da177e4
LT
5441
5442void __init page_alloc_init(void)
5443{
5444 hotcpu_notifier(page_alloc_cpu_notify, 0);
5445}
5446
cb45b0e9
HA
5447/*
5448 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
5449 * or min_free_kbytes changes.
5450 */
5451static void calculate_totalreserve_pages(void)
5452{
5453 struct pglist_data *pgdat;
5454 unsigned long reserve_pages = 0;
2f6726e5 5455 enum zone_type i, j;
cb45b0e9
HA
5456
5457 for_each_online_pgdat(pgdat) {
5458 for (i = 0; i < MAX_NR_ZONES; i++) {
5459 struct zone *zone = pgdat->node_zones + i;
5460 unsigned long max = 0;
5461
5462 /* Find valid and maximum lowmem_reserve in the zone */
5463 for (j = i; j < MAX_NR_ZONES; j++) {
5464 if (zone->lowmem_reserve[j] > max)
5465 max = zone->lowmem_reserve[j];
5466 }
5467
41858966
MG
5468 /* we treat the high watermark as reserved pages. */
5469 max += high_wmark_pages(zone);
cb45b0e9 5470
b40da049
JL
5471 if (max > zone->managed_pages)
5472 max = zone->managed_pages;
cb45b0e9 5473 reserve_pages += max;
ab8fabd4
JW
5474 /*
5475 * Lowmem reserves are not available to
5476 * GFP_HIGHUSER page cache allocations and
5477 * kswapd tries to balance zones to their high
5478 * watermark. As a result, neither should be
5479 * regarded as dirtyable memory, to prevent a
5480 * situation where reclaim has to clean pages
5481 * in order to balance the zones.
5482 */
5483 zone->dirty_balance_reserve = max;
cb45b0e9
HA
5484 }
5485 }
ab8fabd4 5486 dirty_balance_reserve = reserve_pages;
cb45b0e9
HA
5487 totalreserve_pages = reserve_pages;
5488}
5489
1da177e4
LT
5490/*
5491 * setup_per_zone_lowmem_reserve - called whenever
5492 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
5493 * has a correct pages reserved value, so an adequate number of
5494 * pages are left in the zone after a successful __alloc_pages().
5495 */
5496static void setup_per_zone_lowmem_reserve(void)
5497{
5498 struct pglist_data *pgdat;
2f6726e5 5499 enum zone_type j, idx;
1da177e4 5500
ec936fc5 5501 for_each_online_pgdat(pgdat) {
1da177e4
LT
5502 for (j = 0; j < MAX_NR_ZONES; j++) {
5503 struct zone *zone = pgdat->node_zones + j;
b40da049 5504 unsigned long managed_pages = zone->managed_pages;
1da177e4
LT
5505
5506 zone->lowmem_reserve[j] = 0;
5507
2f6726e5
CL
5508 idx = j;
5509 while (idx) {
1da177e4
LT
5510 struct zone *lower_zone;
5511
2f6726e5
CL
5512 idx--;
5513
1da177e4
LT
5514 if (sysctl_lowmem_reserve_ratio[idx] < 1)
5515 sysctl_lowmem_reserve_ratio[idx] = 1;
5516
5517 lower_zone = pgdat->node_zones + idx;
b40da049 5518 lower_zone->lowmem_reserve[j] = managed_pages /
1da177e4 5519 sysctl_lowmem_reserve_ratio[idx];
b40da049 5520 managed_pages += lower_zone->managed_pages;
1da177e4
LT
5521 }
5522 }
5523 }
cb45b0e9
HA
5524
5525 /* update totalreserve_pages */
5526 calculate_totalreserve_pages();
1da177e4
LT
5527}
5528
cfd3da1e 5529static void __setup_per_zone_wmarks(void)
1da177e4
LT
5530{
5531 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5532 unsigned long lowmem_pages = 0;
5533 struct zone *zone;
5534 unsigned long flags;
5535
5536 /* Calculate total number of !ZONE_HIGHMEM pages */
5537 for_each_zone(zone) {
5538 if (!is_highmem(zone))
b40da049 5539 lowmem_pages += zone->managed_pages;
1da177e4
LT
5540 }
5541
5542 for_each_zone(zone) {
ac924c60
AM
5543 u64 tmp;
5544
1125b4e3 5545 spin_lock_irqsave(&zone->lock, flags);
b40da049 5546 tmp = (u64)pages_min * zone->managed_pages;
ac924c60 5547 do_div(tmp, lowmem_pages);
1da177e4
LT
5548 if (is_highmem(zone)) {
5549 /*
669ed175
NP
5550 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5551 * need highmem pages, so cap pages_min to a small
5552 * value here.
5553 *
41858966 5554 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
669ed175
NP
5555 * deltas controls asynch page reclaim, and so should
5556 * not be capped for highmem.
1da177e4 5557 */
90ae8d67 5558 unsigned long min_pages;
1da177e4 5559
b40da049 5560 min_pages = zone->managed_pages / 1024;
90ae8d67 5561 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
41858966 5562 zone->watermark[WMARK_MIN] = min_pages;
1da177e4 5563 } else {
669ed175
NP
5564 /*
5565 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
5566 * proportionate to the zone's size.
5567 */
41858966 5568 zone->watermark[WMARK_MIN] = tmp;
1da177e4
LT
5569 }
5570
41858966
MG
5571 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
5572 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
49f223a9 5573
56fd56b8 5574 setup_zone_migrate_reserve(zone);
1125b4e3 5575 spin_unlock_irqrestore(&zone->lock, flags);
1da177e4 5576 }
cb45b0e9
HA
5577
5578 /* update totalreserve_pages */
5579 calculate_totalreserve_pages();
1da177e4
LT
5580}
5581
cfd3da1e
MG
5582/**
5583 * setup_per_zone_wmarks - called when min_free_kbytes changes
5584 * or when memory is hot-{added|removed}
5585 *
5586 * Ensures that the watermark[min,low,high] values for each zone are set
5587 * correctly with respect to min_free_kbytes.
5588 */
5589void setup_per_zone_wmarks(void)
5590{
5591 mutex_lock(&zonelists_mutex);
5592 __setup_per_zone_wmarks();
5593 mutex_unlock(&zonelists_mutex);
5594}
5595
55a4462a 5596/*
556adecb
RR
5597 * The inactive anon list should be small enough that the VM never has to
5598 * do too much work, but large enough that each inactive page has a chance
5599 * to be referenced again before it is swapped out.
5600 *
5601 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
5602 * INACTIVE_ANON pages on this zone's LRU, maintained by the
5603 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
5604 * the anonymous pages are kept on the inactive list.
5605 *
5606 * total target max
5607 * memory ratio inactive anon
5608 * -------------------------------------
5609 * 10MB 1 5MB
5610 * 100MB 1 50MB
5611 * 1GB 3 250MB
5612 * 10GB 10 0.9GB
5613 * 100GB 31 3GB
5614 * 1TB 101 10GB
5615 * 10TB 320 32GB
5616 */
1b79acc9 5617static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
556adecb 5618{
96cb4df5 5619 unsigned int gb, ratio;
556adecb 5620
96cb4df5 5621 /* Zone size in gigabytes */
b40da049 5622 gb = zone->managed_pages >> (30 - PAGE_SHIFT);
96cb4df5 5623 if (gb)
556adecb 5624 ratio = int_sqrt(10 * gb);
96cb4df5
MK
5625 else
5626 ratio = 1;
556adecb 5627
96cb4df5
MK
5628 zone->inactive_ratio = ratio;
5629}
556adecb 5630
839a4fcc 5631static void __meminit setup_per_zone_inactive_ratio(void)
96cb4df5
MK
5632{
5633 struct zone *zone;
5634
5635 for_each_zone(zone)
5636 calculate_zone_inactive_ratio(zone);
556adecb
RR
5637}
5638
1da177e4
LT
5639/*
5640 * Initialise min_free_kbytes.
5641 *
5642 * For small machines we want it small (128k min). For large machines
5643 * we want it large (64MB max). But it is not linear, because network
5644 * bandwidth does not increase linearly with machine size. We use
5645 *
5646 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5647 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
5648 *
5649 * which yields
5650 *
5651 * 16MB: 512k
5652 * 32MB: 724k
5653 * 64MB: 1024k
5654 * 128MB: 1448k
5655 * 256MB: 2048k
5656 * 512MB: 2896k
5657 * 1024MB: 4096k
5658 * 2048MB: 5792k
5659 * 4096MB: 8192k
5660 * 8192MB: 11584k
5661 * 16384MB: 16384k
5662 */
1b79acc9 5663int __meminit init_per_zone_wmark_min(void)
1da177e4
LT
5664{
5665 unsigned long lowmem_kbytes;
5666
5667 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5668
5669 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5670 if (min_free_kbytes < 128)
5671 min_free_kbytes = 128;
5672 if (min_free_kbytes > 65536)
5673 min_free_kbytes = 65536;
bc75d33f 5674 setup_per_zone_wmarks();
a6cccdc3 5675 refresh_zone_stat_thresholds();
1da177e4 5676 setup_per_zone_lowmem_reserve();
556adecb 5677 setup_per_zone_inactive_ratio();
1da177e4
LT
5678 return 0;
5679}
bc75d33f 5680module_init(init_per_zone_wmark_min)
1da177e4
LT
5681
5682/*
5683 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5684 * that we can call two helper functions whenever min_free_kbytes
5685 * changes.
5686 */
5687int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
8d65af78 5688 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 5689{
8d65af78 5690 proc_dointvec(table, write, buffer, length, ppos);
3b1d92c5 5691 if (write)
bc75d33f 5692 setup_per_zone_wmarks();
1da177e4
LT
5693 return 0;
5694}
5695
9614634f
CL
5696#ifdef CONFIG_NUMA
5697int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5698 void __user *buffer, size_t *length, loff_t *ppos)
9614634f
CL
5699{
5700 struct zone *zone;
5701 int rc;
5702
8d65af78 5703 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
9614634f
CL
5704 if (rc)
5705 return rc;
5706
5707 for_each_zone(zone)
b40da049 5708 zone->min_unmapped_pages = (zone->managed_pages *
9614634f
CL
5709 sysctl_min_unmapped_ratio) / 100;
5710 return 0;
5711}
0ff38490
CL
5712
5713int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5714 void __user *buffer, size_t *length, loff_t *ppos)
0ff38490
CL
5715{
5716 struct zone *zone;
5717 int rc;
5718
8d65af78 5719 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
0ff38490
CL
5720 if (rc)
5721 return rc;
5722
5723 for_each_zone(zone)
b40da049 5724 zone->min_slab_pages = (zone->managed_pages *
0ff38490
CL
5725 sysctl_min_slab_ratio) / 100;
5726 return 0;
5727}
9614634f
CL
5728#endif
5729
1da177e4
LT
5730/*
5731 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5732 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5733 * whenever sysctl_lowmem_reserve_ratio changes.
5734 *
5735 * The reserve ratio obviously has absolutely no relation with the
41858966 5736 * minimum watermarks. The lowmem reserve ratio can only make sense
1da177e4
LT
5737 * if in function of the boot time zone sizes.
5738 */
5739int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5740 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 5741{
8d65af78 5742 proc_dointvec_minmax(table, write, buffer, length, ppos);
1da177e4
LT
5743 setup_per_zone_lowmem_reserve();
5744 return 0;
5745}
5746
8ad4b1fb
RS
5747/*
5748 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5749 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
5750 * can have before it gets flushed back to buddy allocator.
5751 */
5752
5753int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
8d65af78 5754 void __user *buffer, size_t *length, loff_t *ppos)
8ad4b1fb
RS
5755{
5756 struct zone *zone;
5757 unsigned int cpu;
5758 int ret;
5759
8d65af78 5760 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
93278814 5761 if (!write || (ret < 0))
8ad4b1fb 5762 return ret;
364df0eb 5763 for_each_populated_zone(zone) {
99dcc3e5 5764 for_each_possible_cpu(cpu) {
8ad4b1fb 5765 unsigned long high;
b40da049 5766 high = zone->managed_pages / percpu_pagelist_fraction;
99dcc3e5
CL
5767 setup_pagelist_highmark(
5768 per_cpu_ptr(zone->pageset, cpu), high);
8ad4b1fb
RS
5769 }
5770 }
5771 return 0;
5772}
5773
f034b5d4 5774int hashdist = HASHDIST_DEFAULT;
1da177e4
LT
5775
5776#ifdef CONFIG_NUMA
5777static int __init set_hashdist(char *str)
5778{
5779 if (!str)
5780 return 0;
5781 hashdist = simple_strtoul(str, &str, 0);
5782 return 1;
5783}
5784__setup("hashdist=", set_hashdist);
5785#endif
5786
5787/*
5788 * allocate a large system hash table from bootmem
5789 * - it is assumed that the hash table must contain an exact power-of-2
5790 * quantity of entries
5791 * - limit is the number of hash buckets, not the total allocation size
5792 */
5793void *__init alloc_large_system_hash(const char *tablename,
5794 unsigned long bucketsize,
5795 unsigned long numentries,
5796 int scale,
5797 int flags,
5798 unsigned int *_hash_shift,
5799 unsigned int *_hash_mask,
31fe62b9
TB
5800 unsigned long low_limit,
5801 unsigned long high_limit)
1da177e4 5802{
31fe62b9 5803 unsigned long long max = high_limit;
1da177e4
LT
5804 unsigned long log2qty, size;
5805 void *table = NULL;
5806
5807 /* allow the kernel cmdline to have a say */
5808 if (!numentries) {
5809 /* round applicable memory size up to nearest megabyte */
04903664 5810 numentries = nr_kernel_pages;
1da177e4
LT
5811 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5812 numentries >>= 20 - PAGE_SHIFT;
5813 numentries <<= 20 - PAGE_SHIFT;
5814
5815 /* limit to 1 bucket per 2^scale bytes of low memory */
5816 if (scale > PAGE_SHIFT)
5817 numentries >>= (scale - PAGE_SHIFT);
5818 else
5819 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
5820
5821 /* Make sure we've got at least a 0-order allocation.. */
2c85f51d
JB
5822 if (unlikely(flags & HASH_SMALL)) {
5823 /* Makes no sense without HASH_EARLY */
5824 WARN_ON(!(flags & HASH_EARLY));
5825 if (!(numentries >> *_hash_shift)) {
5826 numentries = 1UL << *_hash_shift;
5827 BUG_ON(!numentries);
5828 }
5829 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9ab37b8f 5830 numentries = PAGE_SIZE / bucketsize;
1da177e4 5831 }
6e692ed3 5832 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
5833
5834 /* limit allocation size to 1/16 total memory by default */
5835 if (max == 0) {
5836 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5837 do_div(max, bucketsize);
5838 }
074b8517 5839 max = min(max, 0x80000000ULL);
1da177e4 5840
31fe62b9
TB
5841 if (numentries < low_limit)
5842 numentries = low_limit;
1da177e4
LT
5843 if (numentries > max)
5844 numentries = max;
5845
f0d1b0b3 5846 log2qty = ilog2(numentries);
1da177e4
LT
5847
5848 do {
5849 size = bucketsize << log2qty;
5850 if (flags & HASH_EARLY)
74768ed8 5851 table = alloc_bootmem_nopanic(size);
1da177e4
LT
5852 else if (hashdist)
5853 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5854 else {
1037b83b
ED
5855 /*
5856 * If bucketsize is not a power-of-two, we may free
a1dd268c
MG
5857 * some pages at the end of hash table which
5858 * alloc_pages_exact() automatically does
1037b83b 5859 */
264ef8a9 5860 if (get_order(size) < MAX_ORDER) {
a1dd268c 5861 table = alloc_pages_exact(size, GFP_ATOMIC);
264ef8a9
CM
5862 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5863 }
1da177e4
LT
5864 }
5865 } while (!table && size > PAGE_SIZE && --log2qty);
5866
5867 if (!table)
5868 panic("Failed to allocate %s hash table\n", tablename);
5869
f241e660 5870 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
1da177e4 5871 tablename,
f241e660 5872 (1UL << log2qty),
f0d1b0b3 5873 ilog2(size) - PAGE_SHIFT,
1da177e4
LT
5874 size);
5875
5876 if (_hash_shift)
5877 *_hash_shift = log2qty;
5878 if (_hash_mask)
5879 *_hash_mask = (1 << log2qty) - 1;
5880
5881 return table;
5882}
a117e66e 5883
835c134e
MG
5884/* Return a pointer to the bitmap storing bits affecting a block of pages */
5885static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5886 unsigned long pfn)
5887{
5888#ifdef CONFIG_SPARSEMEM
5889 return __pfn_to_section(pfn)->pageblock_flags;
5890#else
5891 return zone->pageblock_flags;
5892#endif /* CONFIG_SPARSEMEM */
5893}
5894
5895static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5896{
5897#ifdef CONFIG_SPARSEMEM
5898 pfn &= (PAGES_PER_SECTION-1);
d9c23400 5899 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e 5900#else
c060f943 5901 pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
d9c23400 5902 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
5903#endif /* CONFIG_SPARSEMEM */
5904}
5905
5906/**
d9c23400 5907 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
835c134e
MG
5908 * @page: The page within the block of interest
5909 * @start_bitidx: The first bit of interest to retrieve
5910 * @end_bitidx: The last bit of interest
5911 * returns pageblock_bits flags
5912 */
5913unsigned long get_pageblock_flags_group(struct page *page,
5914 int start_bitidx, int end_bitidx)
5915{
5916 struct zone *zone;
5917 unsigned long *bitmap;
5918 unsigned long pfn, bitidx;
5919 unsigned long flags = 0;
5920 unsigned long value = 1;
5921
5922 zone = page_zone(page);
5923 pfn = page_to_pfn(page);
5924 bitmap = get_pageblock_bitmap(zone, pfn);
5925 bitidx = pfn_to_bitidx(zone, pfn);
5926
5927 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5928 if (test_bit(bitidx + start_bitidx, bitmap))
5929 flags |= value;
6220ec78 5930
835c134e
MG
5931 return flags;
5932}
5933
5934/**
d9c23400 5935 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
835c134e
MG
5936 * @page: The page within the block of interest
5937 * @start_bitidx: The first bit of interest
5938 * @end_bitidx: The last bit of interest
5939 * @flags: The flags to set
5940 */
5941void set_pageblock_flags_group(struct page *page, unsigned long flags,
5942 int start_bitidx, int end_bitidx)
5943{
5944 struct zone *zone;
5945 unsigned long *bitmap;
5946 unsigned long pfn, bitidx;
5947 unsigned long value = 1;
5948
5949 zone = page_zone(page);
5950 pfn = page_to_pfn(page);
5951 bitmap = get_pageblock_bitmap(zone, pfn);
5952 bitidx = pfn_to_bitidx(zone, pfn);
108bcc96 5953 VM_BUG_ON(!zone_spans_pfn(zone, pfn));
835c134e
MG
5954
5955 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5956 if (flags & value)
5957 __set_bit(bitidx + start_bitidx, bitmap);
5958 else
5959 __clear_bit(bitidx + start_bitidx, bitmap);
5960}
a5d76b54
KH
5961
5962/*
80934513
MK
5963 * This function checks whether pageblock includes unmovable pages or not.
5964 * If @count is not zero, it is okay to include less @count unmovable pages
5965 *
5966 * PageLRU check wihtout isolation or lru_lock could race so that
5967 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
5968 * expect this function should be exact.
a5d76b54 5969 */
b023f468
WC
5970bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
5971 bool skip_hwpoisoned_pages)
49ac8255
KH
5972{
5973 unsigned long pfn, iter, found;
47118af0
MN
5974 int mt;
5975
49ac8255
KH
5976 /*
5977 * For avoiding noise data, lru_add_drain_all() should be called
80934513 5978 * If ZONE_MOVABLE, the zone never contains unmovable pages
49ac8255
KH
5979 */
5980 if (zone_idx(zone) == ZONE_MOVABLE)
80934513 5981 return false;
47118af0
MN
5982 mt = get_pageblock_migratetype(page);
5983 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
80934513 5984 return false;
49ac8255
KH
5985
5986 pfn = page_to_pfn(page);
5987 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5988 unsigned long check = pfn + iter;
5989
29723fcc 5990 if (!pfn_valid_within(check))
49ac8255 5991 continue;
29723fcc 5992
49ac8255 5993 page = pfn_to_page(check);
97d255c8
MK
5994 /*
5995 * We can't use page_count without pin a page
5996 * because another CPU can free compound page.
5997 * This check already skips compound tails of THP
5998 * because their page->_count is zero at all time.
5999 */
6000 if (!atomic_read(&page->_count)) {
49ac8255
KH
6001 if (PageBuddy(page))
6002 iter += (1 << page_order(page)) - 1;
6003 continue;
6004 }
97d255c8 6005
b023f468
WC
6006 /*
6007 * The HWPoisoned page may be not in buddy system, and
6008 * page_count() is not 0.
6009 */
6010 if (skip_hwpoisoned_pages && PageHWPoison(page))
6011 continue;
6012
49ac8255
KH
6013 if (!PageLRU(page))
6014 found++;
6015 /*
6016 * If there are RECLAIMABLE pages, we need to check it.
6017 * But now, memory offline itself doesn't call shrink_slab()
6018 * and it still to be fixed.
6019 */
6020 /*
6021 * If the page is not RAM, page_count()should be 0.
6022 * we don't need more check. This is an _used_ not-movable page.
6023 *
6024 * The problematic thing here is PG_reserved pages. PG_reserved
6025 * is set to both of a memory hole page and a _used_ kernel
6026 * page at boot.
6027 */
6028 if (found > count)
80934513 6029 return true;
49ac8255 6030 }
80934513 6031 return false;
49ac8255
KH
6032}
6033
6034bool is_pageblock_removable_nolock(struct page *page)
6035{
656a0706
MH
6036 struct zone *zone;
6037 unsigned long pfn;
687875fb
MH
6038
6039 /*
6040 * We have to be careful here because we are iterating over memory
6041 * sections which are not zone aware so we might end up outside of
6042 * the zone but still within the section.
656a0706
MH
6043 * We have to take care about the node as well. If the node is offline
6044 * its NODE_DATA will be NULL - see page_zone.
687875fb 6045 */
656a0706
MH
6046 if (!node_online(page_to_nid(page)))
6047 return false;
6048
6049 zone = page_zone(page);
6050 pfn = page_to_pfn(page);
108bcc96 6051 if (!zone_spans_pfn(zone, pfn))
687875fb
MH
6052 return false;
6053
b023f468 6054 return !has_unmovable_pages(zone, page, 0, true);
a5d76b54 6055}
0c0e6195 6056
041d3a8c
MN
6057#ifdef CONFIG_CMA
6058
6059static unsigned long pfn_max_align_down(unsigned long pfn)
6060{
6061 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
6062 pageblock_nr_pages) - 1);
6063}
6064
6065static unsigned long pfn_max_align_up(unsigned long pfn)
6066{
6067 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
6068 pageblock_nr_pages));
6069}
6070
041d3a8c 6071/* [start, end) must belong to a single zone. */
bb13ffeb
MG
6072static int __alloc_contig_migrate_range(struct compact_control *cc,
6073 unsigned long start, unsigned long end)
041d3a8c
MN
6074{
6075 /* This function is based on compact_zone() from compaction.c. */
beb51eaa 6076 unsigned long nr_reclaimed;
041d3a8c
MN
6077 unsigned long pfn = start;
6078 unsigned int tries = 0;
6079 int ret = 0;
6080
be49a6e1 6081 migrate_prep();
041d3a8c 6082
bb13ffeb 6083 while (pfn < end || !list_empty(&cc->migratepages)) {
041d3a8c
MN
6084 if (fatal_signal_pending(current)) {
6085 ret = -EINTR;
6086 break;
6087 }
6088
bb13ffeb
MG
6089 if (list_empty(&cc->migratepages)) {
6090 cc->nr_migratepages = 0;
6091 pfn = isolate_migratepages_range(cc->zone, cc,
e46a2879 6092 pfn, end, true);
041d3a8c
MN
6093 if (!pfn) {
6094 ret = -EINTR;
6095 break;
6096 }
6097 tries = 0;
6098 } else if (++tries == 5) {
6099 ret = ret < 0 ? ret : -EBUSY;
6100 break;
6101 }
6102
beb51eaa
MK
6103 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6104 &cc->migratepages);
6105 cc->nr_migratepages -= nr_reclaimed;
02c6de8d 6106
9c620e2b
HD
6107 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
6108 0, MIGRATE_SYNC, MR_CMA);
041d3a8c 6109 }
2a6f5124
SP
6110 if (ret < 0) {
6111 putback_movable_pages(&cc->migratepages);
6112 return ret;
6113 }
6114 return 0;
041d3a8c
MN
6115}
6116
6117/**
6118 * alloc_contig_range() -- tries to allocate given range of pages
6119 * @start: start PFN to allocate
6120 * @end: one-past-the-last PFN to allocate
0815f3d8
MN
6121 * @migratetype: migratetype of the underlaying pageblocks (either
6122 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
6123 * in range must have the same migratetype and it must
6124 * be either of the two.
041d3a8c
MN
6125 *
6126 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
6127 * aligned, however it's the caller's responsibility to guarantee that
6128 * we are the only thread that changes migrate type of pageblocks the
6129 * pages fall in.
6130 *
6131 * The PFN range must belong to a single zone.
6132 *
6133 * Returns zero on success or negative error code. On success all
6134 * pages which PFN is in [start, end) are allocated for the caller and
6135 * need to be freed with free_contig_range().
6136 */
0815f3d8
MN
6137int alloc_contig_range(unsigned long start, unsigned long end,
6138 unsigned migratetype)
041d3a8c 6139{
041d3a8c
MN
6140 unsigned long outer_start, outer_end;
6141 int ret = 0, order;
6142
bb13ffeb
MG
6143 struct compact_control cc = {
6144 .nr_migratepages = 0,
6145 .order = -1,
6146 .zone = page_zone(pfn_to_page(start)),
6147 .sync = true,
6148 .ignore_skip_hint = true,
6149 };
6150 INIT_LIST_HEAD(&cc.migratepages);
6151
041d3a8c
MN
6152 /*
6153 * What we do here is we mark all pageblocks in range as
6154 * MIGRATE_ISOLATE. Because pageblock and max order pages may
6155 * have different sizes, and due to the way page allocator
6156 * work, we align the range to biggest of the two pages so
6157 * that page allocator won't try to merge buddies from
6158 * different pageblocks and change MIGRATE_ISOLATE to some
6159 * other migration type.
6160 *
6161 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6162 * migrate the pages from an unaligned range (ie. pages that
6163 * we are interested in). This will put all the pages in
6164 * range back to page allocator as MIGRATE_ISOLATE.
6165 *
6166 * When this is done, we take the pages in range from page
6167 * allocator removing them from the buddy system. This way
6168 * page allocator will never consider using them.
6169 *
6170 * This lets us mark the pageblocks back as
6171 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6172 * aligned range but not in the unaligned, original range are
6173 * put back to page allocator so that buddy can use them.
6174 */
6175
6176 ret = start_isolate_page_range(pfn_max_align_down(start),
b023f468
WC
6177 pfn_max_align_up(end), migratetype,
6178 false);
041d3a8c 6179 if (ret)
86a595f9 6180 return ret;
041d3a8c 6181
bb13ffeb 6182 ret = __alloc_contig_migrate_range(&cc, start, end);
041d3a8c
MN
6183 if (ret)
6184 goto done;
6185
6186 /*
6187 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
6188 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
6189 * more, all pages in [start, end) are free in page allocator.
6190 * What we are going to do is to allocate all pages from
6191 * [start, end) (that is remove them from page allocator).
6192 *
6193 * The only problem is that pages at the beginning and at the
6194 * end of interesting range may be not aligned with pages that
6195 * page allocator holds, ie. they can be part of higher order
6196 * pages. Because of this, we reserve the bigger range and
6197 * once this is done free the pages we are not interested in.
6198 *
6199 * We don't have to hold zone->lock here because the pages are
6200 * isolated thus they won't get removed from buddy.
6201 */
6202
6203 lru_add_drain_all();
6204 drain_all_pages();
6205
6206 order = 0;
6207 outer_start = start;
6208 while (!PageBuddy(pfn_to_page(outer_start))) {
6209 if (++order >= MAX_ORDER) {
6210 ret = -EBUSY;
6211 goto done;
6212 }
6213 outer_start &= ~0UL << order;
6214 }
6215
6216 /* Make sure the range is really isolated. */
b023f468 6217 if (test_pages_isolated(outer_start, end, false)) {
041d3a8c
MN
6218 pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
6219 outer_start, end);
6220 ret = -EBUSY;
6221 goto done;
6222 }
6223
49f223a9
MS
6224
6225 /* Grab isolated pages from freelists. */
bb13ffeb 6226 outer_end = isolate_freepages_range(&cc, outer_start, end);
041d3a8c
MN
6227 if (!outer_end) {
6228 ret = -EBUSY;
6229 goto done;
6230 }
6231
6232 /* Free head and tail (if any) */
6233 if (start != outer_start)
6234 free_contig_range(outer_start, start - outer_start);
6235 if (end != outer_end)
6236 free_contig_range(end, outer_end - end);
6237
6238done:
6239 undo_isolate_page_range(pfn_max_align_down(start),
0815f3d8 6240 pfn_max_align_up(end), migratetype);
041d3a8c
MN
6241 return ret;
6242}
6243
6244void free_contig_range(unsigned long pfn, unsigned nr_pages)
6245{
bcc2b02f
MS
6246 unsigned int count = 0;
6247
6248 for (; nr_pages--; pfn++) {
6249 struct page *page = pfn_to_page(pfn);
6250
6251 count += page_count(page) != 1;
6252 __free_page(page);
6253 }
6254 WARN(count != 0, "%d pages are still in use!\n", count);
041d3a8c
MN
6255}
6256#endif
6257
4ed7e022
JL
6258#ifdef CONFIG_MEMORY_HOTPLUG
6259static int __meminit __zone_pcp_update(void *data)
6260{
6261 struct zone *zone = data;
6262 int cpu;
6263 unsigned long batch = zone_batchsize(zone), flags;
6264
6265 for_each_possible_cpu(cpu) {
6266 struct per_cpu_pageset *pset;
6267 struct per_cpu_pages *pcp;
6268
6269 pset = per_cpu_ptr(zone->pageset, cpu);
6270 pcp = &pset->pcp;
6271
6272 local_irq_save(flags);
6273 if (pcp->count > 0)
6274 free_pcppages_bulk(zone, pcp->count, pcp);
5a883813 6275 drain_zonestat(zone, pset);
4ed7e022
JL
6276 setup_pageset(pset, batch);
6277 local_irq_restore(flags);
6278 }
6279 return 0;
6280}
6281
6282void __meminit zone_pcp_update(struct zone *zone)
6283{
6284 stop_machine(__zone_pcp_update, zone, NULL);
6285}
6286#endif
6287
340175b7
JL
6288void zone_pcp_reset(struct zone *zone)
6289{
6290 unsigned long flags;
5a883813
MK
6291 int cpu;
6292 struct per_cpu_pageset *pset;
340175b7
JL
6293
6294 /* avoid races with drain_pages() */
6295 local_irq_save(flags);
6296 if (zone->pageset != &boot_pageset) {
5a883813
MK
6297 for_each_online_cpu(cpu) {
6298 pset = per_cpu_ptr(zone->pageset, cpu);
6299 drain_zonestat(zone, pset);
6300 }
340175b7
JL
6301 free_percpu(zone->pageset);
6302 zone->pageset = &boot_pageset;
6303 }
6304 local_irq_restore(flags);
6305}
6306
6dcd73d7 6307#ifdef CONFIG_MEMORY_HOTREMOVE
0c0e6195
KH
6308/*
6309 * All pages in the range must be isolated before calling this.
6310 */
6311void
6312__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6313{
6314 struct page *page;
6315 struct zone *zone;
6316 int order, i;
6317 unsigned long pfn;
6318 unsigned long flags;
6319 /* find the first valid pfn */
6320 for (pfn = start_pfn; pfn < end_pfn; pfn++)
6321 if (pfn_valid(pfn))
6322 break;
6323 if (pfn == end_pfn)
6324 return;
6325 zone = page_zone(pfn_to_page(pfn));
6326 spin_lock_irqsave(&zone->lock, flags);
6327 pfn = start_pfn;
6328 while (pfn < end_pfn) {
6329 if (!pfn_valid(pfn)) {
6330 pfn++;
6331 continue;
6332 }
6333 page = pfn_to_page(pfn);
b023f468
WC
6334 /*
6335 * The HWPoisoned page may be not in buddy system, and
6336 * page_count() is not 0.
6337 */
6338 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
6339 pfn++;
6340 SetPageReserved(page);
6341 continue;
6342 }
6343
0c0e6195
KH
6344 BUG_ON(page_count(page));
6345 BUG_ON(!PageBuddy(page));
6346 order = page_order(page);
6347#ifdef CONFIG_DEBUG_VM
6348 printk(KERN_INFO "remove from free list %lx %d %lx\n",
6349 pfn, 1 << order, end_pfn);
6350#endif
6351 list_del(&page->lru);
6352 rmv_page_order(page);
6353 zone->free_area[order].nr_free--;
0c0e6195
KH
6354 for (i = 0; i < (1 << order); i++)
6355 SetPageReserved((page+i));
6356 pfn += (1 << order);
6357 }
6358 spin_unlock_irqrestore(&zone->lock, flags);
6359}
6360#endif
8d22ba1b
WF
6361
6362#ifdef CONFIG_MEMORY_FAILURE
6363bool is_free_buddy_page(struct page *page)
6364{
6365 struct zone *zone = page_zone(page);
6366 unsigned long pfn = page_to_pfn(page);
6367 unsigned long flags;
6368 int order;
6369
6370 spin_lock_irqsave(&zone->lock, flags);
6371 for (order = 0; order < MAX_ORDER; order++) {
6372 struct page *page_head = page - (pfn & ((1 << order) - 1));
6373
6374 if (PageBuddy(page_head) && page_order(page_head) >= order)
6375 break;
6376 }
6377 spin_unlock_irqrestore(&zone->lock, flags);
6378
6379 return order < MAX_ORDER;
6380}
6381#endif
718a3821 6382
51300cef 6383static const struct trace_print_flags pageflag_names[] = {
718a3821
WF
6384 {1UL << PG_locked, "locked" },
6385 {1UL << PG_error, "error" },
6386 {1UL << PG_referenced, "referenced" },
6387 {1UL << PG_uptodate, "uptodate" },
6388 {1UL << PG_dirty, "dirty" },
6389 {1UL << PG_lru, "lru" },
6390 {1UL << PG_active, "active" },
6391 {1UL << PG_slab, "slab" },
6392 {1UL << PG_owner_priv_1, "owner_priv_1" },
6393 {1UL << PG_arch_1, "arch_1" },
6394 {1UL << PG_reserved, "reserved" },
6395 {1UL << PG_private, "private" },
6396 {1UL << PG_private_2, "private_2" },
6397 {1UL << PG_writeback, "writeback" },
6398#ifdef CONFIG_PAGEFLAGS_EXTENDED
6399 {1UL << PG_head, "head" },
6400 {1UL << PG_tail, "tail" },
6401#else
6402 {1UL << PG_compound, "compound" },
6403#endif
6404 {1UL << PG_swapcache, "swapcache" },
6405 {1UL << PG_mappedtodisk, "mappedtodisk" },
6406 {1UL << PG_reclaim, "reclaim" },
718a3821
WF
6407 {1UL << PG_swapbacked, "swapbacked" },
6408 {1UL << PG_unevictable, "unevictable" },
6409#ifdef CONFIG_MMU
6410 {1UL << PG_mlocked, "mlocked" },
6411#endif
6412#ifdef CONFIG_ARCH_USES_PG_UNCACHED
6413 {1UL << PG_uncached, "uncached" },
6414#endif
6415#ifdef CONFIG_MEMORY_FAILURE
6416 {1UL << PG_hwpoison, "hwpoison" },
be9cd873
GS
6417#endif
6418#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6419 {1UL << PG_compound_lock, "compound_lock" },
718a3821 6420#endif
718a3821
WF
6421};
6422
6423static void dump_page_flags(unsigned long flags)
6424{
6425 const char *delim = "";
6426 unsigned long mask;
6427 int i;
6428
51300cef 6429 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
acc50c11 6430
718a3821
WF
6431 printk(KERN_ALERT "page flags: %#lx(", flags);
6432
6433 /* remove zone id */
6434 flags &= (1UL << NR_PAGEFLAGS) - 1;
6435
51300cef 6436 for (i = 0; i < ARRAY_SIZE(pageflag_names) && flags; i++) {
718a3821
WF
6437
6438 mask = pageflag_names[i].mask;
6439 if ((flags & mask) != mask)
6440 continue;
6441
6442 flags &= ~mask;
6443 printk("%s%s", delim, pageflag_names[i].name);
6444 delim = "|";
6445 }
6446
6447 /* check for left over flags */
6448 if (flags)
6449 printk("%s%#lx", delim, flags);
6450
6451 printk(")\n");
6452}
6453
6454void dump_page(struct page *page)
6455{
6456 printk(KERN_ALERT
6457 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
4e9f64c4 6458 page, atomic_read(&page->_count), page_mapcount(page),
718a3821
WF
6459 page->mapping, page->index);
6460 dump_page_flags(page->flags);
f212ad7c 6461 mem_cgroup_print_bad_page(page);
718a3821 6462}