MAINTAINERS: Update amd-iommu F: patterns
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / page_alloc.c
1 /*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
17 #include <linux/stddef.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/interrupt.h>
21 #include <linux/pagemap.h>
22 #include <linux/jiffies.h>
23 #include <linux/bootmem.h>
24 #include <linux/memblock.h>
25 #include <linux/compiler.h>
26 #include <linux/kernel.h>
27 #include <linux/kmemcheck.h>
28 #include <linux/module.h>
29 #include <linux/suspend.h>
30 #include <linux/pagevec.h>
31 #include <linux/blkdev.h>
32 #include <linux/slab.h>
33 #include <linux/ratelimit.h>
34 #include <linux/oom.h>
35 #include <linux/notifier.h>
36 #include <linux/topology.h>
37 #include <linux/sysctl.h>
38 #include <linux/cpu.h>
39 #include <linux/cpuset.h>
40 #include <linux/memory_hotplug.h>
41 #include <linux/nodemask.h>
42 #include <linux/vmalloc.h>
43 #include <linux/vmstat.h>
44 #include <linux/mempolicy.h>
45 #include <linux/stop_machine.h>
46 #include <linux/sort.h>
47 #include <linux/pfn.h>
48 #include <linux/backing-dev.h>
49 #include <linux/fault-inject.h>
50 #include <linux/page-isolation.h>
51 #include <linux/page_cgroup.h>
52 #include <linux/debugobjects.h>
53 #include <linux/kmemleak.h>
54 #include <linux/memory.h>
55 #include <linux/compaction.h>
56 #include <trace/events/kmem.h>
57 #include <linux/ftrace_event.h>
58 #include <linux/memcontrol.h>
59 #include <linux/prefetch.h>
60
61 #include <asm/tlbflush.h>
62 #include <asm/div64.h>
63 #include "internal.h"
64
65 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
66 DEFINE_PER_CPU(int, numa_node);
67 EXPORT_PER_CPU_SYMBOL(numa_node);
68 #endif
69
70 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
71 /*
72 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
73 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
74 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
75 * defined in <linux/topology.h>.
76 */
77 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
78 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
79 #endif
80
81 /*
82 * Array of node states.
83 */
84 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
85 [N_POSSIBLE] = NODE_MASK_ALL,
86 [N_ONLINE] = { { [0] = 1UL } },
87 #ifndef CONFIG_NUMA
88 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
89 #ifdef CONFIG_HIGHMEM
90 [N_HIGH_MEMORY] = { { [0] = 1UL } },
91 #endif
92 [N_CPU] = { { [0] = 1UL } },
93 #endif /* NUMA */
94 };
95 EXPORT_SYMBOL(node_states);
96
97 unsigned long totalram_pages __read_mostly;
98 unsigned long totalreserve_pages __read_mostly;
99 int percpu_pagelist_fraction;
100 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
101
102 #ifdef CONFIG_PM_SLEEP
103 /*
104 * The following functions are used by the suspend/hibernate code to temporarily
105 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
106 * while devices are suspended. To avoid races with the suspend/hibernate code,
107 * they should always be called with pm_mutex held (gfp_allowed_mask also should
108 * only be modified with pm_mutex held, unless the suspend/hibernate code is
109 * guaranteed not to run in parallel with that modification).
110 */
111
112 static gfp_t saved_gfp_mask;
113
114 void pm_restore_gfp_mask(void)
115 {
116 WARN_ON(!mutex_is_locked(&pm_mutex));
117 if (saved_gfp_mask) {
118 gfp_allowed_mask = saved_gfp_mask;
119 saved_gfp_mask = 0;
120 }
121 }
122
123 void pm_restrict_gfp_mask(void)
124 {
125 WARN_ON(!mutex_is_locked(&pm_mutex));
126 WARN_ON(saved_gfp_mask);
127 saved_gfp_mask = gfp_allowed_mask;
128 gfp_allowed_mask &= ~GFP_IOFS;
129 }
130 #endif /* CONFIG_PM_SLEEP */
131
132 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
133 int pageblock_order __read_mostly;
134 #endif
135
136 static void __free_pages_ok(struct page *page, unsigned int order);
137
138 /*
139 * results with 256, 32 in the lowmem_reserve sysctl:
140 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
141 * 1G machine -> (16M dma, 784M normal, 224M high)
142 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
143 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
144 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
145 *
146 * TBD: should special case ZONE_DMA32 machines here - in those we normally
147 * don't need any ZONE_NORMAL reservation
148 */
149 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
150 #ifdef CONFIG_ZONE_DMA
151 256,
152 #endif
153 #ifdef CONFIG_ZONE_DMA32
154 256,
155 #endif
156 #ifdef CONFIG_HIGHMEM
157 32,
158 #endif
159 32,
160 };
161
162 EXPORT_SYMBOL(totalram_pages);
163
164 static char * const zone_names[MAX_NR_ZONES] = {
165 #ifdef CONFIG_ZONE_DMA
166 "DMA",
167 #endif
168 #ifdef CONFIG_ZONE_DMA32
169 "DMA32",
170 #endif
171 "Normal",
172 #ifdef CONFIG_HIGHMEM
173 "HighMem",
174 #endif
175 "Movable",
176 };
177
178 int min_free_kbytes = 1024;
179
180 static unsigned long __meminitdata nr_kernel_pages;
181 static unsigned long __meminitdata nr_all_pages;
182 static unsigned long __meminitdata dma_reserve;
183
184 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
185 /*
186 * MAX_ACTIVE_REGIONS determines the maximum number of distinct
187 * ranges of memory (RAM) that may be registered with add_active_range().
188 * Ranges passed to add_active_range() will be merged if possible
189 * so the number of times add_active_range() can be called is
190 * related to the number of nodes and the number of holes
191 */
192 #ifdef CONFIG_MAX_ACTIVE_REGIONS
193 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
194 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
195 #else
196 #if MAX_NUMNODES >= 32
197 /* If there can be many nodes, allow up to 50 holes per node */
198 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
199 #else
200 /* By default, allow up to 256 distinct regions */
201 #define MAX_ACTIVE_REGIONS 256
202 #endif
203 #endif
204
205 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
206 static int __meminitdata nr_nodemap_entries;
207 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
208 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
209 static unsigned long __initdata required_kernelcore;
210 static unsigned long __initdata required_movablecore;
211 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
212
213 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
214 int movable_zone;
215 EXPORT_SYMBOL(movable_zone);
216 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
217
218 #if MAX_NUMNODES > 1
219 int nr_node_ids __read_mostly = MAX_NUMNODES;
220 int nr_online_nodes __read_mostly = 1;
221 EXPORT_SYMBOL(nr_node_ids);
222 EXPORT_SYMBOL(nr_online_nodes);
223 #endif
224
225 int page_group_by_mobility_disabled __read_mostly;
226
227 static void set_pageblock_migratetype(struct page *page, int migratetype)
228 {
229
230 if (unlikely(page_group_by_mobility_disabled))
231 migratetype = MIGRATE_UNMOVABLE;
232
233 set_pageblock_flags_group(page, (unsigned long)migratetype,
234 PB_migrate, PB_migrate_end);
235 }
236
237 bool oom_killer_disabled __read_mostly;
238
239 #ifdef CONFIG_DEBUG_VM
240 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
241 {
242 int ret = 0;
243 unsigned seq;
244 unsigned long pfn = page_to_pfn(page);
245
246 do {
247 seq = zone_span_seqbegin(zone);
248 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
249 ret = 1;
250 else if (pfn < zone->zone_start_pfn)
251 ret = 1;
252 } while (zone_span_seqretry(zone, seq));
253
254 return ret;
255 }
256
257 static int page_is_consistent(struct zone *zone, struct page *page)
258 {
259 if (!pfn_valid_within(page_to_pfn(page)))
260 return 0;
261 if (zone != page_zone(page))
262 return 0;
263
264 return 1;
265 }
266 /*
267 * Temporary debugging check for pages not lying within a given zone.
268 */
269 static int bad_range(struct zone *zone, struct page *page)
270 {
271 if (page_outside_zone_boundaries(zone, page))
272 return 1;
273 if (!page_is_consistent(zone, page))
274 return 1;
275
276 return 0;
277 }
278 #else
279 static inline int bad_range(struct zone *zone, struct page *page)
280 {
281 return 0;
282 }
283 #endif
284
285 static void bad_page(struct page *page)
286 {
287 static unsigned long resume;
288 static unsigned long nr_shown;
289 static unsigned long nr_unshown;
290
291 /* Don't complain about poisoned pages */
292 if (PageHWPoison(page)) {
293 reset_page_mapcount(page); /* remove PageBuddy */
294 return;
295 }
296
297 /*
298 * Allow a burst of 60 reports, then keep quiet for that minute;
299 * or allow a steady drip of one report per second.
300 */
301 if (nr_shown == 60) {
302 if (time_before(jiffies, resume)) {
303 nr_unshown++;
304 goto out;
305 }
306 if (nr_unshown) {
307 printk(KERN_ALERT
308 "BUG: Bad page state: %lu messages suppressed\n",
309 nr_unshown);
310 nr_unshown = 0;
311 }
312 nr_shown = 0;
313 }
314 if (nr_shown++ == 0)
315 resume = jiffies + 60 * HZ;
316
317 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
318 current->comm, page_to_pfn(page));
319 dump_page(page);
320
321 print_modules();
322 dump_stack();
323 out:
324 /* Leave bad fields for debug, except PageBuddy could make trouble */
325 reset_page_mapcount(page); /* remove PageBuddy */
326 add_taint(TAINT_BAD_PAGE);
327 }
328
329 /*
330 * Higher-order pages are called "compound pages". They are structured thusly:
331 *
332 * The first PAGE_SIZE page is called the "head page".
333 *
334 * The remaining PAGE_SIZE pages are called "tail pages".
335 *
336 * All pages have PG_compound set. All pages have their ->private pointing at
337 * the head page (even the head page has this).
338 *
339 * The first tail page's ->lru.next holds the address of the compound page's
340 * put_page() function. Its ->lru.prev holds the order of allocation.
341 * This usage means that zero-order pages may not be compound.
342 */
343
344 static void free_compound_page(struct page *page)
345 {
346 __free_pages_ok(page, compound_order(page));
347 }
348
349 void prep_compound_page(struct page *page, unsigned long order)
350 {
351 int i;
352 int nr_pages = 1 << order;
353
354 set_compound_page_dtor(page, free_compound_page);
355 set_compound_order(page, order);
356 __SetPageHead(page);
357 for (i = 1; i < nr_pages; i++) {
358 struct page *p = page + i;
359
360 __SetPageTail(p);
361 p->first_page = page;
362 }
363 }
364
365 /* update __split_huge_page_refcount if you change this function */
366 static int destroy_compound_page(struct page *page, unsigned long order)
367 {
368 int i;
369 int nr_pages = 1 << order;
370 int bad = 0;
371
372 if (unlikely(compound_order(page) != order) ||
373 unlikely(!PageHead(page))) {
374 bad_page(page);
375 bad++;
376 }
377
378 __ClearPageHead(page);
379
380 for (i = 1; i < nr_pages; i++) {
381 struct page *p = page + i;
382
383 if (unlikely(!PageTail(p) || (p->first_page != page))) {
384 bad_page(page);
385 bad++;
386 }
387 __ClearPageTail(p);
388 }
389
390 return bad;
391 }
392
393 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
394 {
395 int i;
396
397 /*
398 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
399 * and __GFP_HIGHMEM from hard or soft interrupt context.
400 */
401 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
402 for (i = 0; i < (1 << order); i++)
403 clear_highpage(page + i);
404 }
405
406 static inline void set_page_order(struct page *page, int order)
407 {
408 set_page_private(page, order);
409 __SetPageBuddy(page);
410 }
411
412 static inline void rmv_page_order(struct page *page)
413 {
414 __ClearPageBuddy(page);
415 set_page_private(page, 0);
416 }
417
418 /*
419 * Locate the struct page for both the matching buddy in our
420 * pair (buddy1) and the combined O(n+1) page they form (page).
421 *
422 * 1) Any buddy B1 will have an order O twin B2 which satisfies
423 * the following equation:
424 * B2 = B1 ^ (1 << O)
425 * For example, if the starting buddy (buddy2) is #8 its order
426 * 1 buddy is #10:
427 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
428 *
429 * 2) Any buddy B will have an order O+1 parent P which
430 * satisfies the following equation:
431 * P = B & ~(1 << O)
432 *
433 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
434 */
435 static inline unsigned long
436 __find_buddy_index(unsigned long page_idx, unsigned int order)
437 {
438 return page_idx ^ (1 << order);
439 }
440
441 /*
442 * This function checks whether a page is free && is the buddy
443 * we can do coalesce a page and its buddy if
444 * (a) the buddy is not in a hole &&
445 * (b) the buddy is in the buddy system &&
446 * (c) a page and its buddy have the same order &&
447 * (d) a page and its buddy are in the same zone.
448 *
449 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
450 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
451 *
452 * For recording page's order, we use page_private(page).
453 */
454 static inline int page_is_buddy(struct page *page, struct page *buddy,
455 int order)
456 {
457 if (!pfn_valid_within(page_to_pfn(buddy)))
458 return 0;
459
460 if (page_zone_id(page) != page_zone_id(buddy))
461 return 0;
462
463 if (PageBuddy(buddy) && page_order(buddy) == order) {
464 VM_BUG_ON(page_count(buddy) != 0);
465 return 1;
466 }
467 return 0;
468 }
469
470 /*
471 * Freeing function for a buddy system allocator.
472 *
473 * The concept of a buddy system is to maintain direct-mapped table
474 * (containing bit values) for memory blocks of various "orders".
475 * The bottom level table contains the map for the smallest allocatable
476 * units of memory (here, pages), and each level above it describes
477 * pairs of units from the levels below, hence, "buddies".
478 * At a high level, all that happens here is marking the table entry
479 * at the bottom level available, and propagating the changes upward
480 * as necessary, plus some accounting needed to play nicely with other
481 * parts of the VM system.
482 * At each level, we keep a list of pages, which are heads of continuous
483 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
484 * order is recorded in page_private(page) field.
485 * So when we are allocating or freeing one, we can derive the state of the
486 * other. That is, if we allocate a small block, and both were
487 * free, the remainder of the region must be split into blocks.
488 * If a block is freed, and its buddy is also free, then this
489 * triggers coalescing into a block of larger size.
490 *
491 * -- wli
492 */
493
494 static inline void __free_one_page(struct page *page,
495 struct zone *zone, unsigned int order,
496 int migratetype)
497 {
498 unsigned long page_idx;
499 unsigned long combined_idx;
500 unsigned long uninitialized_var(buddy_idx);
501 struct page *buddy;
502
503 if (unlikely(PageCompound(page)))
504 if (unlikely(destroy_compound_page(page, order)))
505 return;
506
507 VM_BUG_ON(migratetype == -1);
508
509 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
510
511 VM_BUG_ON(page_idx & ((1 << order) - 1));
512 VM_BUG_ON(bad_range(zone, page));
513
514 while (order < MAX_ORDER-1) {
515 buddy_idx = __find_buddy_index(page_idx, order);
516 buddy = page + (buddy_idx - page_idx);
517 if (!page_is_buddy(page, buddy, order))
518 break;
519
520 /* Our buddy is free, merge with it and move up one order. */
521 list_del(&buddy->lru);
522 zone->free_area[order].nr_free--;
523 rmv_page_order(buddy);
524 combined_idx = buddy_idx & page_idx;
525 page = page + (combined_idx - page_idx);
526 page_idx = combined_idx;
527 order++;
528 }
529 set_page_order(page, order);
530
531 /*
532 * If this is not the largest possible page, check if the buddy
533 * of the next-highest order is free. If it is, it's possible
534 * that pages are being freed that will coalesce soon. In case,
535 * that is happening, add the free page to the tail of the list
536 * so it's less likely to be used soon and more likely to be merged
537 * as a higher order page
538 */
539 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
540 struct page *higher_page, *higher_buddy;
541 combined_idx = buddy_idx & page_idx;
542 higher_page = page + (combined_idx - page_idx);
543 buddy_idx = __find_buddy_index(combined_idx, order + 1);
544 higher_buddy = page + (buddy_idx - combined_idx);
545 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
546 list_add_tail(&page->lru,
547 &zone->free_area[order].free_list[migratetype]);
548 goto out;
549 }
550 }
551
552 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
553 out:
554 zone->free_area[order].nr_free++;
555 }
556
557 /*
558 * free_page_mlock() -- clean up attempts to free and mlocked() page.
559 * Page should not be on lru, so no need to fix that up.
560 * free_pages_check() will verify...
561 */
562 static inline void free_page_mlock(struct page *page)
563 {
564 __dec_zone_page_state(page, NR_MLOCK);
565 __count_vm_event(UNEVICTABLE_MLOCKFREED);
566 }
567
568 static inline int free_pages_check(struct page *page)
569 {
570 if (unlikely(page_mapcount(page) |
571 (page->mapping != NULL) |
572 (atomic_read(&page->_count) != 0) |
573 (page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
574 (mem_cgroup_bad_page_check(page)))) {
575 bad_page(page);
576 return 1;
577 }
578 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
579 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
580 return 0;
581 }
582
583 /*
584 * Frees a number of pages from the PCP lists
585 * Assumes all pages on list are in same zone, and of same order.
586 * count is the number of pages to free.
587 *
588 * If the zone was previously in an "all pages pinned" state then look to
589 * see if this freeing clears that state.
590 *
591 * And clear the zone's pages_scanned counter, to hold off the "all pages are
592 * pinned" detection logic.
593 */
594 static void free_pcppages_bulk(struct zone *zone, int count,
595 struct per_cpu_pages *pcp)
596 {
597 int migratetype = 0;
598 int batch_free = 0;
599 int to_free = count;
600
601 spin_lock(&zone->lock);
602 zone->all_unreclaimable = 0;
603 zone->pages_scanned = 0;
604
605 while (to_free) {
606 struct page *page;
607 struct list_head *list;
608
609 /*
610 * Remove pages from lists in a round-robin fashion. A
611 * batch_free count is maintained that is incremented when an
612 * empty list is encountered. This is so more pages are freed
613 * off fuller lists instead of spinning excessively around empty
614 * lists
615 */
616 do {
617 batch_free++;
618 if (++migratetype == MIGRATE_PCPTYPES)
619 migratetype = 0;
620 list = &pcp->lists[migratetype];
621 } while (list_empty(list));
622
623 /* This is the only non-empty list. Free them all. */
624 if (batch_free == MIGRATE_PCPTYPES)
625 batch_free = to_free;
626
627 do {
628 page = list_entry(list->prev, struct page, lru);
629 /* must delete as __free_one_page list manipulates */
630 list_del(&page->lru);
631 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
632 __free_one_page(page, zone, 0, page_private(page));
633 trace_mm_page_pcpu_drain(page, 0, page_private(page));
634 } while (--to_free && --batch_free && !list_empty(list));
635 }
636 __mod_zone_page_state(zone, NR_FREE_PAGES, count);
637 spin_unlock(&zone->lock);
638 }
639
640 static void free_one_page(struct zone *zone, struct page *page, int order,
641 int migratetype)
642 {
643 spin_lock(&zone->lock);
644 zone->all_unreclaimable = 0;
645 zone->pages_scanned = 0;
646
647 __free_one_page(page, zone, order, migratetype);
648 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
649 spin_unlock(&zone->lock);
650 }
651
652 static bool free_pages_prepare(struct page *page, unsigned int order)
653 {
654 int i;
655 int bad = 0;
656
657 trace_mm_page_free_direct(page, order);
658 kmemcheck_free_shadow(page, order);
659
660 if (PageAnon(page))
661 page->mapping = NULL;
662 for (i = 0; i < (1 << order); i++)
663 bad += free_pages_check(page + i);
664 if (bad)
665 return false;
666
667 if (!PageHighMem(page)) {
668 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
669 debug_check_no_obj_freed(page_address(page),
670 PAGE_SIZE << order);
671 }
672 arch_free_page(page, order);
673 kernel_map_pages(page, 1 << order, 0);
674
675 return true;
676 }
677
678 static void __free_pages_ok(struct page *page, unsigned int order)
679 {
680 unsigned long flags;
681 int wasMlocked = __TestClearPageMlocked(page);
682
683 if (!free_pages_prepare(page, order))
684 return;
685
686 local_irq_save(flags);
687 if (unlikely(wasMlocked))
688 free_page_mlock(page);
689 __count_vm_events(PGFREE, 1 << order);
690 free_one_page(page_zone(page), page, order,
691 get_pageblock_migratetype(page));
692 local_irq_restore(flags);
693 }
694
695 /*
696 * permit the bootmem allocator to evade page validation on high-order frees
697 */
698 void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
699 {
700 if (order == 0) {
701 __ClearPageReserved(page);
702 set_page_count(page, 0);
703 set_page_refcounted(page);
704 __free_page(page);
705 } else {
706 int loop;
707
708 prefetchw(page);
709 for (loop = 0; loop < BITS_PER_LONG; loop++) {
710 struct page *p = &page[loop];
711
712 if (loop + 1 < BITS_PER_LONG)
713 prefetchw(p + 1);
714 __ClearPageReserved(p);
715 set_page_count(p, 0);
716 }
717
718 set_page_refcounted(page);
719 __free_pages(page, order);
720 }
721 }
722
723
724 /*
725 * The order of subdivision here is critical for the IO subsystem.
726 * Please do not alter this order without good reasons and regression
727 * testing. Specifically, as large blocks of memory are subdivided,
728 * the order in which smaller blocks are delivered depends on the order
729 * they're subdivided in this function. This is the primary factor
730 * influencing the order in which pages are delivered to the IO
731 * subsystem according to empirical testing, and this is also justified
732 * by considering the behavior of a buddy system containing a single
733 * large block of memory acted on by a series of small allocations.
734 * This behavior is a critical factor in sglist merging's success.
735 *
736 * -- wli
737 */
738 static inline void expand(struct zone *zone, struct page *page,
739 int low, int high, struct free_area *area,
740 int migratetype)
741 {
742 unsigned long size = 1 << high;
743
744 while (high > low) {
745 area--;
746 high--;
747 size >>= 1;
748 VM_BUG_ON(bad_range(zone, &page[size]));
749 list_add(&page[size].lru, &area->free_list[migratetype]);
750 area->nr_free++;
751 set_page_order(&page[size], high);
752 }
753 }
754
755 /*
756 * This page is about to be returned from the page allocator
757 */
758 static inline int check_new_page(struct page *page)
759 {
760 if (unlikely(page_mapcount(page) |
761 (page->mapping != NULL) |
762 (atomic_read(&page->_count) != 0) |
763 (page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
764 (mem_cgroup_bad_page_check(page)))) {
765 bad_page(page);
766 return 1;
767 }
768 return 0;
769 }
770
771 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
772 {
773 int i;
774
775 for (i = 0; i < (1 << order); i++) {
776 struct page *p = page + i;
777 if (unlikely(check_new_page(p)))
778 return 1;
779 }
780
781 set_page_private(page, 0);
782 set_page_refcounted(page);
783
784 arch_alloc_page(page, order);
785 kernel_map_pages(page, 1 << order, 1);
786
787 if (gfp_flags & __GFP_ZERO)
788 prep_zero_page(page, order, gfp_flags);
789
790 if (order && (gfp_flags & __GFP_COMP))
791 prep_compound_page(page, order);
792
793 return 0;
794 }
795
796 /*
797 * Go through the free lists for the given migratetype and remove
798 * the smallest available page from the freelists
799 */
800 static inline
801 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
802 int migratetype)
803 {
804 unsigned int current_order;
805 struct free_area * area;
806 struct page *page;
807
808 /* Find a page of the appropriate size in the preferred list */
809 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
810 area = &(zone->free_area[current_order]);
811 if (list_empty(&area->free_list[migratetype]))
812 continue;
813
814 page = list_entry(area->free_list[migratetype].next,
815 struct page, lru);
816 list_del(&page->lru);
817 rmv_page_order(page);
818 area->nr_free--;
819 expand(zone, page, order, current_order, area, migratetype);
820 return page;
821 }
822
823 return NULL;
824 }
825
826
827 /*
828 * This array describes the order lists are fallen back to when
829 * the free lists for the desirable migrate type are depleted
830 */
831 static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
832 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
833 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
834 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
835 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
836 };
837
838 /*
839 * Move the free pages in a range to the free lists of the requested type.
840 * Note that start_page and end_pages are not aligned on a pageblock
841 * boundary. If alignment is required, use move_freepages_block()
842 */
843 static int move_freepages(struct zone *zone,
844 struct page *start_page, struct page *end_page,
845 int migratetype)
846 {
847 struct page *page;
848 unsigned long order;
849 int pages_moved = 0;
850
851 #ifndef CONFIG_HOLES_IN_ZONE
852 /*
853 * page_zone is not safe to call in this context when
854 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
855 * anyway as we check zone boundaries in move_freepages_block().
856 * Remove at a later date when no bug reports exist related to
857 * grouping pages by mobility
858 */
859 BUG_ON(page_zone(start_page) != page_zone(end_page));
860 #endif
861
862 for (page = start_page; page <= end_page;) {
863 /* Make sure we are not inadvertently changing nodes */
864 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
865
866 if (!pfn_valid_within(page_to_pfn(page))) {
867 page++;
868 continue;
869 }
870
871 if (!PageBuddy(page)) {
872 page++;
873 continue;
874 }
875
876 order = page_order(page);
877 list_move(&page->lru,
878 &zone->free_area[order].free_list[migratetype]);
879 page += 1 << order;
880 pages_moved += 1 << order;
881 }
882
883 return pages_moved;
884 }
885
886 static int move_freepages_block(struct zone *zone, struct page *page,
887 int migratetype)
888 {
889 unsigned long start_pfn, end_pfn;
890 struct page *start_page, *end_page;
891
892 start_pfn = page_to_pfn(page);
893 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
894 start_page = pfn_to_page(start_pfn);
895 end_page = start_page + pageblock_nr_pages - 1;
896 end_pfn = start_pfn + pageblock_nr_pages - 1;
897
898 /* Do not cross zone boundaries */
899 if (start_pfn < zone->zone_start_pfn)
900 start_page = page;
901 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
902 return 0;
903
904 return move_freepages(zone, start_page, end_page, migratetype);
905 }
906
907 static void change_pageblock_range(struct page *pageblock_page,
908 int start_order, int migratetype)
909 {
910 int nr_pageblocks = 1 << (start_order - pageblock_order);
911
912 while (nr_pageblocks--) {
913 set_pageblock_migratetype(pageblock_page, migratetype);
914 pageblock_page += pageblock_nr_pages;
915 }
916 }
917
918 /* Remove an element from the buddy allocator from the fallback list */
919 static inline struct page *
920 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
921 {
922 struct free_area * area;
923 int current_order;
924 struct page *page;
925 int migratetype, i;
926
927 /* Find the largest possible block of pages in the other list */
928 for (current_order = MAX_ORDER-1; current_order >= order;
929 --current_order) {
930 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
931 migratetype = fallbacks[start_migratetype][i];
932
933 /* MIGRATE_RESERVE handled later if necessary */
934 if (migratetype == MIGRATE_RESERVE)
935 continue;
936
937 area = &(zone->free_area[current_order]);
938 if (list_empty(&area->free_list[migratetype]))
939 continue;
940
941 page = list_entry(area->free_list[migratetype].next,
942 struct page, lru);
943 area->nr_free--;
944
945 /*
946 * If breaking a large block of pages, move all free
947 * pages to the preferred allocation list. If falling
948 * back for a reclaimable kernel allocation, be more
949 * aggressive about taking ownership of free pages
950 */
951 if (unlikely(current_order >= (pageblock_order >> 1)) ||
952 start_migratetype == MIGRATE_RECLAIMABLE ||
953 page_group_by_mobility_disabled) {
954 unsigned long pages;
955 pages = move_freepages_block(zone, page,
956 start_migratetype);
957
958 /* Claim the whole block if over half of it is free */
959 if (pages >= (1 << (pageblock_order-1)) ||
960 page_group_by_mobility_disabled)
961 set_pageblock_migratetype(page,
962 start_migratetype);
963
964 migratetype = start_migratetype;
965 }
966
967 /* Remove the page from the freelists */
968 list_del(&page->lru);
969 rmv_page_order(page);
970
971 /* Take ownership for orders >= pageblock_order */
972 if (current_order >= pageblock_order)
973 change_pageblock_range(page, current_order,
974 start_migratetype);
975
976 expand(zone, page, order, current_order, area, migratetype);
977
978 trace_mm_page_alloc_extfrag(page, order, current_order,
979 start_migratetype, migratetype);
980
981 return page;
982 }
983 }
984
985 return NULL;
986 }
987
988 /*
989 * Do the hard work of removing an element from the buddy allocator.
990 * Call me with the zone->lock already held.
991 */
992 static struct page *__rmqueue(struct zone *zone, unsigned int order,
993 int migratetype)
994 {
995 struct page *page;
996
997 retry_reserve:
998 page = __rmqueue_smallest(zone, order, migratetype);
999
1000 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1001 page = __rmqueue_fallback(zone, order, migratetype);
1002
1003 /*
1004 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1005 * is used because __rmqueue_smallest is an inline function
1006 * and we want just one call site
1007 */
1008 if (!page) {
1009 migratetype = MIGRATE_RESERVE;
1010 goto retry_reserve;
1011 }
1012 }
1013
1014 trace_mm_page_alloc_zone_locked(page, order, migratetype);
1015 return page;
1016 }
1017
1018 /*
1019 * Obtain a specified number of elements from the buddy allocator, all under
1020 * a single hold of the lock, for efficiency. Add them to the supplied list.
1021 * Returns the number of new pages which were placed at *list.
1022 */
1023 static int rmqueue_bulk(struct zone *zone, unsigned int order,
1024 unsigned long count, struct list_head *list,
1025 int migratetype, int cold)
1026 {
1027 int i;
1028
1029 spin_lock(&zone->lock);
1030 for (i = 0; i < count; ++i) {
1031 struct page *page = __rmqueue(zone, order, migratetype);
1032 if (unlikely(page == NULL))
1033 break;
1034
1035 /*
1036 * Split buddy pages returned by expand() are received here
1037 * in physical page order. The page is added to the callers and
1038 * list and the list head then moves forward. From the callers
1039 * perspective, the linked list is ordered by page number in
1040 * some conditions. This is useful for IO devices that can
1041 * merge IO requests if the physical pages are ordered
1042 * properly.
1043 */
1044 if (likely(cold == 0))
1045 list_add(&page->lru, list);
1046 else
1047 list_add_tail(&page->lru, list);
1048 set_page_private(page, migratetype);
1049 list = &page->lru;
1050 }
1051 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1052 spin_unlock(&zone->lock);
1053 return i;
1054 }
1055
1056 #ifdef CONFIG_NUMA
1057 /*
1058 * Called from the vmstat counter updater to drain pagesets of this
1059 * currently executing processor on remote nodes after they have
1060 * expired.
1061 *
1062 * Note that this function must be called with the thread pinned to
1063 * a single processor.
1064 */
1065 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1066 {
1067 unsigned long flags;
1068 int to_drain;
1069
1070 local_irq_save(flags);
1071 if (pcp->count >= pcp->batch)
1072 to_drain = pcp->batch;
1073 else
1074 to_drain = pcp->count;
1075 free_pcppages_bulk(zone, to_drain, pcp);
1076 pcp->count -= to_drain;
1077 local_irq_restore(flags);
1078 }
1079 #endif
1080
1081 /*
1082 * Drain pages of the indicated processor.
1083 *
1084 * The processor must either be the current processor and the
1085 * thread pinned to the current processor or a processor that
1086 * is not online.
1087 */
1088 static void drain_pages(unsigned int cpu)
1089 {
1090 unsigned long flags;
1091 struct zone *zone;
1092
1093 for_each_populated_zone(zone) {
1094 struct per_cpu_pageset *pset;
1095 struct per_cpu_pages *pcp;
1096
1097 local_irq_save(flags);
1098 pset = per_cpu_ptr(zone->pageset, cpu);
1099
1100 pcp = &pset->pcp;
1101 if (pcp->count) {
1102 free_pcppages_bulk(zone, pcp->count, pcp);
1103 pcp->count = 0;
1104 }
1105 local_irq_restore(flags);
1106 }
1107 }
1108
1109 /*
1110 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1111 */
1112 void drain_local_pages(void *arg)
1113 {
1114 drain_pages(smp_processor_id());
1115 }
1116
1117 /*
1118 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1119 */
1120 void drain_all_pages(void)
1121 {
1122 on_each_cpu(drain_local_pages, NULL, 1);
1123 }
1124
1125 #ifdef CONFIG_HIBERNATION
1126
1127 void mark_free_pages(struct zone *zone)
1128 {
1129 unsigned long pfn, max_zone_pfn;
1130 unsigned long flags;
1131 int order, t;
1132 struct list_head *curr;
1133
1134 if (!zone->spanned_pages)
1135 return;
1136
1137 spin_lock_irqsave(&zone->lock, flags);
1138
1139 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1140 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1141 if (pfn_valid(pfn)) {
1142 struct page *page = pfn_to_page(pfn);
1143
1144 if (!swsusp_page_is_forbidden(page))
1145 swsusp_unset_page_free(page);
1146 }
1147
1148 for_each_migratetype_order(order, t) {
1149 list_for_each(curr, &zone->free_area[order].free_list[t]) {
1150 unsigned long i;
1151
1152 pfn = page_to_pfn(list_entry(curr, struct page, lru));
1153 for (i = 0; i < (1UL << order); i++)
1154 swsusp_set_page_free(pfn_to_page(pfn + i));
1155 }
1156 }
1157 spin_unlock_irqrestore(&zone->lock, flags);
1158 }
1159 #endif /* CONFIG_PM */
1160
1161 /*
1162 * Free a 0-order page
1163 * cold == 1 ? free a cold page : free a hot page
1164 */
1165 void free_hot_cold_page(struct page *page, int cold)
1166 {
1167 struct zone *zone = page_zone(page);
1168 struct per_cpu_pages *pcp;
1169 unsigned long flags;
1170 int migratetype;
1171 int wasMlocked = __TestClearPageMlocked(page);
1172
1173 if (!free_pages_prepare(page, 0))
1174 return;
1175
1176 migratetype = get_pageblock_migratetype(page);
1177 set_page_private(page, migratetype);
1178 local_irq_save(flags);
1179 if (unlikely(wasMlocked))
1180 free_page_mlock(page);
1181 __count_vm_event(PGFREE);
1182
1183 /*
1184 * We only track unmovable, reclaimable and movable on pcp lists.
1185 * Free ISOLATE pages back to the allocator because they are being
1186 * offlined but treat RESERVE as movable pages so we can get those
1187 * areas back if necessary. Otherwise, we may have to free
1188 * excessively into the page allocator
1189 */
1190 if (migratetype >= MIGRATE_PCPTYPES) {
1191 if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1192 free_one_page(zone, page, 0, migratetype);
1193 goto out;
1194 }
1195 migratetype = MIGRATE_MOVABLE;
1196 }
1197
1198 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1199 if (cold)
1200 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1201 else
1202 list_add(&page->lru, &pcp->lists[migratetype]);
1203 pcp->count++;
1204 if (pcp->count >= pcp->high) {
1205 free_pcppages_bulk(zone, pcp->batch, pcp);
1206 pcp->count -= pcp->batch;
1207 }
1208
1209 out:
1210 local_irq_restore(flags);
1211 }
1212
1213 /*
1214 * split_page takes a non-compound higher-order page, and splits it into
1215 * n (1<<order) sub-pages: page[0..n]
1216 * Each sub-page must be freed individually.
1217 *
1218 * Note: this is probably too low level an operation for use in drivers.
1219 * Please consult with lkml before using this in your driver.
1220 */
1221 void split_page(struct page *page, unsigned int order)
1222 {
1223 int i;
1224
1225 VM_BUG_ON(PageCompound(page));
1226 VM_BUG_ON(!page_count(page));
1227
1228 #ifdef CONFIG_KMEMCHECK
1229 /*
1230 * Split shadow pages too, because free(page[0]) would
1231 * otherwise free the whole shadow.
1232 */
1233 if (kmemcheck_page_is_tracked(page))
1234 split_page(virt_to_page(page[0].shadow), order);
1235 #endif
1236
1237 for (i = 1; i < (1 << order); i++)
1238 set_page_refcounted(page + i);
1239 }
1240
1241 /*
1242 * Similar to split_page except the page is already free. As this is only
1243 * being used for migration, the migratetype of the block also changes.
1244 * As this is called with interrupts disabled, the caller is responsible
1245 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1246 * are enabled.
1247 *
1248 * Note: this is probably too low level an operation for use in drivers.
1249 * Please consult with lkml before using this in your driver.
1250 */
1251 int split_free_page(struct page *page)
1252 {
1253 unsigned int order;
1254 unsigned long watermark;
1255 struct zone *zone;
1256
1257 BUG_ON(!PageBuddy(page));
1258
1259 zone = page_zone(page);
1260 order = page_order(page);
1261
1262 /* Obey watermarks as if the page was being allocated */
1263 watermark = low_wmark_pages(zone) + (1 << order);
1264 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1265 return 0;
1266
1267 /* Remove page from free list */
1268 list_del(&page->lru);
1269 zone->free_area[order].nr_free--;
1270 rmv_page_order(page);
1271 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1272
1273 /* Split into individual pages */
1274 set_page_refcounted(page);
1275 split_page(page, order);
1276
1277 if (order >= pageblock_order - 1) {
1278 struct page *endpage = page + (1 << order) - 1;
1279 for (; page < endpage; page += pageblock_nr_pages)
1280 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1281 }
1282
1283 return 1 << order;
1284 }
1285
1286 /*
1287 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
1288 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1289 * or two.
1290 */
1291 static inline
1292 struct page *buffered_rmqueue(struct zone *preferred_zone,
1293 struct zone *zone, int order, gfp_t gfp_flags,
1294 int migratetype)
1295 {
1296 unsigned long flags;
1297 struct page *page;
1298 int cold = !!(gfp_flags & __GFP_COLD);
1299
1300 again:
1301 if (likely(order == 0)) {
1302 struct per_cpu_pages *pcp;
1303 struct list_head *list;
1304
1305 local_irq_save(flags);
1306 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1307 list = &pcp->lists[migratetype];
1308 if (list_empty(list)) {
1309 pcp->count += rmqueue_bulk(zone, 0,
1310 pcp->batch, list,
1311 migratetype, cold);
1312 if (unlikely(list_empty(list)))
1313 goto failed;
1314 }
1315
1316 if (cold)
1317 page = list_entry(list->prev, struct page, lru);
1318 else
1319 page = list_entry(list->next, struct page, lru);
1320
1321 list_del(&page->lru);
1322 pcp->count--;
1323 } else {
1324 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1325 /*
1326 * __GFP_NOFAIL is not to be used in new code.
1327 *
1328 * All __GFP_NOFAIL callers should be fixed so that they
1329 * properly detect and handle allocation failures.
1330 *
1331 * We most definitely don't want callers attempting to
1332 * allocate greater than order-1 page units with
1333 * __GFP_NOFAIL.
1334 */
1335 WARN_ON_ONCE(order > 1);
1336 }
1337 spin_lock_irqsave(&zone->lock, flags);
1338 page = __rmqueue(zone, order, migratetype);
1339 spin_unlock(&zone->lock);
1340 if (!page)
1341 goto failed;
1342 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1343 }
1344
1345 __count_zone_vm_events(PGALLOC, zone, 1 << order);
1346 zone_statistics(preferred_zone, zone, gfp_flags);
1347 local_irq_restore(flags);
1348
1349 VM_BUG_ON(bad_range(zone, page));
1350 if (prep_new_page(page, order, gfp_flags))
1351 goto again;
1352 return page;
1353
1354 failed:
1355 local_irq_restore(flags);
1356 return NULL;
1357 }
1358
1359 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1360 #define ALLOC_WMARK_MIN WMARK_MIN
1361 #define ALLOC_WMARK_LOW WMARK_LOW
1362 #define ALLOC_WMARK_HIGH WMARK_HIGH
1363 #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1364
1365 /* Mask to get the watermark bits */
1366 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1367
1368 #define ALLOC_HARDER 0x10 /* try to alloc harder */
1369 #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
1370 #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
1371
1372 #ifdef CONFIG_FAIL_PAGE_ALLOC
1373
1374 static struct {
1375 struct fault_attr attr;
1376
1377 u32 ignore_gfp_highmem;
1378 u32 ignore_gfp_wait;
1379 u32 min_order;
1380 } fail_page_alloc = {
1381 .attr = FAULT_ATTR_INITIALIZER,
1382 .ignore_gfp_wait = 1,
1383 .ignore_gfp_highmem = 1,
1384 .min_order = 1,
1385 };
1386
1387 static int __init setup_fail_page_alloc(char *str)
1388 {
1389 return setup_fault_attr(&fail_page_alloc.attr, str);
1390 }
1391 __setup("fail_page_alloc=", setup_fail_page_alloc);
1392
1393 static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1394 {
1395 if (order < fail_page_alloc.min_order)
1396 return 0;
1397 if (gfp_mask & __GFP_NOFAIL)
1398 return 0;
1399 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1400 return 0;
1401 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1402 return 0;
1403
1404 return should_fail(&fail_page_alloc.attr, 1 << order);
1405 }
1406
1407 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1408
1409 static int __init fail_page_alloc_debugfs(void)
1410 {
1411 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1412 struct dentry *dir;
1413
1414 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
1415 &fail_page_alloc.attr);
1416 if (IS_ERR(dir))
1417 return PTR_ERR(dir);
1418
1419 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
1420 &fail_page_alloc.ignore_gfp_wait))
1421 goto fail;
1422 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1423 &fail_page_alloc.ignore_gfp_highmem))
1424 goto fail;
1425 if (!debugfs_create_u32("min-order", mode, dir,
1426 &fail_page_alloc.min_order))
1427 goto fail;
1428
1429 return 0;
1430 fail:
1431 debugfs_remove_recursive(dir);
1432
1433 return -ENOMEM;
1434 }
1435
1436 late_initcall(fail_page_alloc_debugfs);
1437
1438 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1439
1440 #else /* CONFIG_FAIL_PAGE_ALLOC */
1441
1442 static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1443 {
1444 return 0;
1445 }
1446
1447 #endif /* CONFIG_FAIL_PAGE_ALLOC */
1448
1449 /*
1450 * Return true if free pages are above 'mark'. This takes into account the order
1451 * of the allocation.
1452 */
1453 static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1454 int classzone_idx, int alloc_flags, long free_pages)
1455 {
1456 /* free_pages my go negative - that's OK */
1457 long min = mark;
1458 int o;
1459
1460 free_pages -= (1 << order) + 1;
1461 if (alloc_flags & ALLOC_HIGH)
1462 min -= min / 2;
1463 if (alloc_flags & ALLOC_HARDER)
1464 min -= min / 4;
1465
1466 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1467 return false;
1468 for (o = 0; o < order; o++) {
1469 /* At the next order, this order's pages become unavailable */
1470 free_pages -= z->free_area[o].nr_free << o;
1471
1472 /* Require fewer higher order pages to be free */
1473 min >>= 1;
1474
1475 if (free_pages <= min)
1476 return false;
1477 }
1478 return true;
1479 }
1480
1481 bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1482 int classzone_idx, int alloc_flags)
1483 {
1484 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1485 zone_page_state(z, NR_FREE_PAGES));
1486 }
1487
1488 bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1489 int classzone_idx, int alloc_flags)
1490 {
1491 long free_pages = zone_page_state(z, NR_FREE_PAGES);
1492
1493 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1494 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1495
1496 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1497 free_pages);
1498 }
1499
1500 #ifdef CONFIG_NUMA
1501 /*
1502 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1503 * skip over zones that are not allowed by the cpuset, or that have
1504 * been recently (in last second) found to be nearly full. See further
1505 * comments in mmzone.h. Reduces cache footprint of zonelist scans
1506 * that have to skip over a lot of full or unallowed zones.
1507 *
1508 * If the zonelist cache is present in the passed in zonelist, then
1509 * returns a pointer to the allowed node mask (either the current
1510 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1511 *
1512 * If the zonelist cache is not available for this zonelist, does
1513 * nothing and returns NULL.
1514 *
1515 * If the fullzones BITMAP in the zonelist cache is stale (more than
1516 * a second since last zap'd) then we zap it out (clear its bits.)
1517 *
1518 * We hold off even calling zlc_setup, until after we've checked the
1519 * first zone in the zonelist, on the theory that most allocations will
1520 * be satisfied from that first zone, so best to examine that zone as
1521 * quickly as we can.
1522 */
1523 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1524 {
1525 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1526 nodemask_t *allowednodes; /* zonelist_cache approximation */
1527
1528 zlc = zonelist->zlcache_ptr;
1529 if (!zlc)
1530 return NULL;
1531
1532 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1533 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1534 zlc->last_full_zap = jiffies;
1535 }
1536
1537 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1538 &cpuset_current_mems_allowed :
1539 &node_states[N_HIGH_MEMORY];
1540 return allowednodes;
1541 }
1542
1543 /*
1544 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1545 * if it is worth looking at further for free memory:
1546 * 1) Check that the zone isn't thought to be full (doesn't have its
1547 * bit set in the zonelist_cache fullzones BITMAP).
1548 * 2) Check that the zones node (obtained from the zonelist_cache
1549 * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1550 * Return true (non-zero) if zone is worth looking at further, or
1551 * else return false (zero) if it is not.
1552 *
1553 * This check -ignores- the distinction between various watermarks,
1554 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
1555 * found to be full for any variation of these watermarks, it will
1556 * be considered full for up to one second by all requests, unless
1557 * we are so low on memory on all allowed nodes that we are forced
1558 * into the second scan of the zonelist.
1559 *
1560 * In the second scan we ignore this zonelist cache and exactly
1561 * apply the watermarks to all zones, even it is slower to do so.
1562 * We are low on memory in the second scan, and should leave no stone
1563 * unturned looking for a free page.
1564 */
1565 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1566 nodemask_t *allowednodes)
1567 {
1568 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1569 int i; /* index of *z in zonelist zones */
1570 int n; /* node that zone *z is on */
1571
1572 zlc = zonelist->zlcache_ptr;
1573 if (!zlc)
1574 return 1;
1575
1576 i = z - zonelist->_zonerefs;
1577 n = zlc->z_to_n[i];
1578
1579 /* This zone is worth trying if it is allowed but not full */
1580 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1581 }
1582
1583 /*
1584 * Given 'z' scanning a zonelist, set the corresponding bit in
1585 * zlc->fullzones, so that subsequent attempts to allocate a page
1586 * from that zone don't waste time re-examining it.
1587 */
1588 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1589 {
1590 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1591 int i; /* index of *z in zonelist zones */
1592
1593 zlc = zonelist->zlcache_ptr;
1594 if (!zlc)
1595 return;
1596
1597 i = z - zonelist->_zonerefs;
1598
1599 set_bit(i, zlc->fullzones);
1600 }
1601
1602 /*
1603 * clear all zones full, called after direct reclaim makes progress so that
1604 * a zone that was recently full is not skipped over for up to a second
1605 */
1606 static void zlc_clear_zones_full(struct zonelist *zonelist)
1607 {
1608 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1609
1610 zlc = zonelist->zlcache_ptr;
1611 if (!zlc)
1612 return;
1613
1614 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1615 }
1616
1617 #else /* CONFIG_NUMA */
1618
1619 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1620 {
1621 return NULL;
1622 }
1623
1624 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1625 nodemask_t *allowednodes)
1626 {
1627 return 1;
1628 }
1629
1630 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1631 {
1632 }
1633
1634 static void zlc_clear_zones_full(struct zonelist *zonelist)
1635 {
1636 }
1637 #endif /* CONFIG_NUMA */
1638
1639 /*
1640 * get_page_from_freelist goes through the zonelist trying to allocate
1641 * a page.
1642 */
1643 static struct page *
1644 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1645 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1646 struct zone *preferred_zone, int migratetype)
1647 {
1648 struct zoneref *z;
1649 struct page *page = NULL;
1650 int classzone_idx;
1651 struct zone *zone;
1652 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1653 int zlc_active = 0; /* set if using zonelist_cache */
1654 int did_zlc_setup = 0; /* just call zlc_setup() one time */
1655
1656 classzone_idx = zone_idx(preferred_zone);
1657 zonelist_scan:
1658 /*
1659 * Scan zonelist, looking for a zone with enough free.
1660 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1661 */
1662 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1663 high_zoneidx, nodemask) {
1664 if (NUMA_BUILD && zlc_active &&
1665 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1666 continue;
1667 if ((alloc_flags & ALLOC_CPUSET) &&
1668 !cpuset_zone_allowed_softwall(zone, gfp_mask))
1669 continue;
1670
1671 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1672 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1673 unsigned long mark;
1674 int ret;
1675
1676 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1677 if (zone_watermark_ok(zone, order, mark,
1678 classzone_idx, alloc_flags))
1679 goto try_this_zone;
1680
1681 if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1682 /*
1683 * we do zlc_setup if there are multiple nodes
1684 * and before considering the first zone allowed
1685 * by the cpuset.
1686 */
1687 allowednodes = zlc_setup(zonelist, alloc_flags);
1688 zlc_active = 1;
1689 did_zlc_setup = 1;
1690 }
1691
1692 if (zone_reclaim_mode == 0)
1693 goto this_zone_full;
1694
1695 /*
1696 * As we may have just activated ZLC, check if the first
1697 * eligible zone has failed zone_reclaim recently.
1698 */
1699 if (NUMA_BUILD && zlc_active &&
1700 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1701 continue;
1702
1703 ret = zone_reclaim(zone, gfp_mask, order);
1704 switch (ret) {
1705 case ZONE_RECLAIM_NOSCAN:
1706 /* did not scan */
1707 continue;
1708 case ZONE_RECLAIM_FULL:
1709 /* scanned but unreclaimable */
1710 continue;
1711 default:
1712 /* did we reclaim enough */
1713 if (!zone_watermark_ok(zone, order, mark,
1714 classzone_idx, alloc_flags))
1715 goto this_zone_full;
1716 }
1717 }
1718
1719 try_this_zone:
1720 page = buffered_rmqueue(preferred_zone, zone, order,
1721 gfp_mask, migratetype);
1722 if (page)
1723 break;
1724 this_zone_full:
1725 if (NUMA_BUILD)
1726 zlc_mark_zone_full(zonelist, z);
1727 }
1728
1729 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1730 /* Disable zlc cache for second zonelist scan */
1731 zlc_active = 0;
1732 goto zonelist_scan;
1733 }
1734 return page;
1735 }
1736
1737 /*
1738 * Large machines with many possible nodes should not always dump per-node
1739 * meminfo in irq context.
1740 */
1741 static inline bool should_suppress_show_mem(void)
1742 {
1743 bool ret = false;
1744
1745 #if NODES_SHIFT > 8
1746 ret = in_interrupt();
1747 #endif
1748 return ret;
1749 }
1750
1751 static DEFINE_RATELIMIT_STATE(nopage_rs,
1752 DEFAULT_RATELIMIT_INTERVAL,
1753 DEFAULT_RATELIMIT_BURST);
1754
1755 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
1756 {
1757 unsigned int filter = SHOW_MEM_FILTER_NODES;
1758
1759 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
1760 return;
1761
1762 /*
1763 * This documents exceptions given to allocations in certain
1764 * contexts that are allowed to allocate outside current's set
1765 * of allowed nodes.
1766 */
1767 if (!(gfp_mask & __GFP_NOMEMALLOC))
1768 if (test_thread_flag(TIF_MEMDIE) ||
1769 (current->flags & (PF_MEMALLOC | PF_EXITING)))
1770 filter &= ~SHOW_MEM_FILTER_NODES;
1771 if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
1772 filter &= ~SHOW_MEM_FILTER_NODES;
1773
1774 if (fmt) {
1775 struct va_format vaf;
1776 va_list args;
1777
1778 va_start(args, fmt);
1779
1780 vaf.fmt = fmt;
1781 vaf.va = &args;
1782
1783 pr_warn("%pV", &vaf);
1784
1785 va_end(args);
1786 }
1787
1788 pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
1789 current->comm, order, gfp_mask);
1790
1791 dump_stack();
1792 if (!should_suppress_show_mem())
1793 show_mem(filter);
1794 }
1795
1796 static inline int
1797 should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1798 unsigned long pages_reclaimed)
1799 {
1800 /* Do not loop if specifically requested */
1801 if (gfp_mask & __GFP_NORETRY)
1802 return 0;
1803
1804 /*
1805 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1806 * means __GFP_NOFAIL, but that may not be true in other
1807 * implementations.
1808 */
1809 if (order <= PAGE_ALLOC_COSTLY_ORDER)
1810 return 1;
1811
1812 /*
1813 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1814 * specified, then we retry until we no longer reclaim any pages
1815 * (above), or we've reclaimed an order of pages at least as
1816 * large as the allocation's order. In both cases, if the
1817 * allocation still fails, we stop retrying.
1818 */
1819 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1820 return 1;
1821
1822 /*
1823 * Don't let big-order allocations loop unless the caller
1824 * explicitly requests that.
1825 */
1826 if (gfp_mask & __GFP_NOFAIL)
1827 return 1;
1828
1829 return 0;
1830 }
1831
1832 static inline struct page *
1833 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1834 struct zonelist *zonelist, enum zone_type high_zoneidx,
1835 nodemask_t *nodemask, struct zone *preferred_zone,
1836 int migratetype)
1837 {
1838 struct page *page;
1839
1840 /* Acquire the OOM killer lock for the zones in zonelist */
1841 if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
1842 schedule_timeout_uninterruptible(1);
1843 return NULL;
1844 }
1845
1846 /*
1847 * Go through the zonelist yet one more time, keep very high watermark
1848 * here, this is only to catch a parallel oom killing, we must fail if
1849 * we're still under heavy pressure.
1850 */
1851 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1852 order, zonelist, high_zoneidx,
1853 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1854 preferred_zone, migratetype);
1855 if (page)
1856 goto out;
1857
1858 if (!(gfp_mask & __GFP_NOFAIL)) {
1859 /* The OOM killer will not help higher order allocs */
1860 if (order > PAGE_ALLOC_COSTLY_ORDER)
1861 goto out;
1862 /* The OOM killer does not needlessly kill tasks for lowmem */
1863 if (high_zoneidx < ZONE_NORMAL)
1864 goto out;
1865 /*
1866 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1867 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1868 * The caller should handle page allocation failure by itself if
1869 * it specifies __GFP_THISNODE.
1870 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1871 */
1872 if (gfp_mask & __GFP_THISNODE)
1873 goto out;
1874 }
1875 /* Exhausted what can be done so it's blamo time */
1876 out_of_memory(zonelist, gfp_mask, order, nodemask);
1877
1878 out:
1879 clear_zonelist_oom(zonelist, gfp_mask);
1880 return page;
1881 }
1882
1883 #ifdef CONFIG_COMPACTION
1884 /* Try memory compaction for high-order allocations before reclaim */
1885 static struct page *
1886 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1887 struct zonelist *zonelist, enum zone_type high_zoneidx,
1888 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1889 int migratetype, unsigned long *did_some_progress,
1890 bool sync_migration)
1891 {
1892 struct page *page;
1893
1894 if (!order || compaction_deferred(preferred_zone))
1895 return NULL;
1896
1897 current->flags |= PF_MEMALLOC;
1898 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1899 nodemask, sync_migration);
1900 current->flags &= ~PF_MEMALLOC;
1901 if (*did_some_progress != COMPACT_SKIPPED) {
1902
1903 /* Page migration frees to the PCP lists but we want merging */
1904 drain_pages(get_cpu());
1905 put_cpu();
1906
1907 page = get_page_from_freelist(gfp_mask, nodemask,
1908 order, zonelist, high_zoneidx,
1909 alloc_flags, preferred_zone,
1910 migratetype);
1911 if (page) {
1912 preferred_zone->compact_considered = 0;
1913 preferred_zone->compact_defer_shift = 0;
1914 count_vm_event(COMPACTSUCCESS);
1915 return page;
1916 }
1917
1918 /*
1919 * It's bad if compaction run occurs and fails.
1920 * The most likely reason is that pages exist,
1921 * but not enough to satisfy watermarks.
1922 */
1923 count_vm_event(COMPACTFAIL);
1924 defer_compaction(preferred_zone);
1925
1926 cond_resched();
1927 }
1928
1929 return NULL;
1930 }
1931 #else
1932 static inline struct page *
1933 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1934 struct zonelist *zonelist, enum zone_type high_zoneidx,
1935 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1936 int migratetype, unsigned long *did_some_progress,
1937 bool sync_migration)
1938 {
1939 return NULL;
1940 }
1941 #endif /* CONFIG_COMPACTION */
1942
1943 /* The really slow allocator path where we enter direct reclaim */
1944 static inline struct page *
1945 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1946 struct zonelist *zonelist, enum zone_type high_zoneidx,
1947 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1948 int migratetype, unsigned long *did_some_progress)
1949 {
1950 struct page *page = NULL;
1951 struct reclaim_state reclaim_state;
1952 bool drained = false;
1953
1954 cond_resched();
1955
1956 /* We now go into synchronous reclaim */
1957 cpuset_memory_pressure_bump();
1958 current->flags |= PF_MEMALLOC;
1959 lockdep_set_current_reclaim_state(gfp_mask);
1960 reclaim_state.reclaimed_slab = 0;
1961 current->reclaim_state = &reclaim_state;
1962
1963 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1964
1965 current->reclaim_state = NULL;
1966 lockdep_clear_current_reclaim_state();
1967 current->flags &= ~PF_MEMALLOC;
1968
1969 cond_resched();
1970
1971 if (unlikely(!(*did_some_progress)))
1972 return NULL;
1973
1974 /* After successful reclaim, reconsider all zones for allocation */
1975 if (NUMA_BUILD)
1976 zlc_clear_zones_full(zonelist);
1977
1978 retry:
1979 page = get_page_from_freelist(gfp_mask, nodemask, order,
1980 zonelist, high_zoneidx,
1981 alloc_flags, preferred_zone,
1982 migratetype);
1983
1984 /*
1985 * If an allocation failed after direct reclaim, it could be because
1986 * pages are pinned on the per-cpu lists. Drain them and try again
1987 */
1988 if (!page && !drained) {
1989 drain_all_pages();
1990 drained = true;
1991 goto retry;
1992 }
1993
1994 return page;
1995 }
1996
1997 /*
1998 * This is called in the allocator slow-path if the allocation request is of
1999 * sufficient urgency to ignore watermarks and take other desperate measures
2000 */
2001 static inline struct page *
2002 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2003 struct zonelist *zonelist, enum zone_type high_zoneidx,
2004 nodemask_t *nodemask, struct zone *preferred_zone,
2005 int migratetype)
2006 {
2007 struct page *page;
2008
2009 do {
2010 page = get_page_from_freelist(gfp_mask, nodemask, order,
2011 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
2012 preferred_zone, migratetype);
2013
2014 if (!page && gfp_mask & __GFP_NOFAIL)
2015 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2016 } while (!page && (gfp_mask & __GFP_NOFAIL));
2017
2018 return page;
2019 }
2020
2021 static inline
2022 void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
2023 enum zone_type high_zoneidx,
2024 enum zone_type classzone_idx)
2025 {
2026 struct zoneref *z;
2027 struct zone *zone;
2028
2029 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
2030 wakeup_kswapd(zone, order, classzone_idx);
2031 }
2032
2033 static inline int
2034 gfp_to_alloc_flags(gfp_t gfp_mask)
2035 {
2036 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2037 const gfp_t wait = gfp_mask & __GFP_WAIT;
2038
2039 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2040 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
2041
2042 /*
2043 * The caller may dip into page reserves a bit more if the caller
2044 * cannot run direct reclaim, or if the caller has realtime scheduling
2045 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
2046 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
2047 */
2048 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2049
2050 if (!wait) {
2051 /*
2052 * Not worth trying to allocate harder for
2053 * __GFP_NOMEMALLOC even if it can't schedule.
2054 */
2055 if (!(gfp_mask & __GFP_NOMEMALLOC))
2056 alloc_flags |= ALLOC_HARDER;
2057 /*
2058 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
2059 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
2060 */
2061 alloc_flags &= ~ALLOC_CPUSET;
2062 } else if (unlikely(rt_task(current)) && !in_interrupt())
2063 alloc_flags |= ALLOC_HARDER;
2064
2065 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2066 if (!in_interrupt() &&
2067 ((current->flags & PF_MEMALLOC) ||
2068 unlikely(test_thread_flag(TIF_MEMDIE))))
2069 alloc_flags |= ALLOC_NO_WATERMARKS;
2070 }
2071
2072 return alloc_flags;
2073 }
2074
2075 static inline struct page *
2076 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2077 struct zonelist *zonelist, enum zone_type high_zoneidx,
2078 nodemask_t *nodemask, struct zone *preferred_zone,
2079 int migratetype)
2080 {
2081 const gfp_t wait = gfp_mask & __GFP_WAIT;
2082 struct page *page = NULL;
2083 int alloc_flags;
2084 unsigned long pages_reclaimed = 0;
2085 unsigned long did_some_progress;
2086 bool sync_migration = false;
2087
2088 /*
2089 * In the slowpath, we sanity check order to avoid ever trying to
2090 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2091 * be using allocators in order of preference for an area that is
2092 * too large.
2093 */
2094 if (order >= MAX_ORDER) {
2095 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2096 return NULL;
2097 }
2098
2099 /*
2100 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2101 * __GFP_NOWARN set) should not cause reclaim since the subsystem
2102 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2103 * using a larger set of nodes after it has established that the
2104 * allowed per node queues are empty and that nodes are
2105 * over allocated.
2106 */
2107 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2108 goto nopage;
2109
2110 restart:
2111 if (!(gfp_mask & __GFP_NO_KSWAPD))
2112 wake_all_kswapd(order, zonelist, high_zoneidx,
2113 zone_idx(preferred_zone));
2114
2115 /*
2116 * OK, we're below the kswapd watermark and have kicked background
2117 * reclaim. Now things get more complex, so set up alloc_flags according
2118 * to how we want to proceed.
2119 */
2120 alloc_flags = gfp_to_alloc_flags(gfp_mask);
2121
2122 /*
2123 * Find the true preferred zone if the allocation is unconstrained by
2124 * cpusets.
2125 */
2126 if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2127 first_zones_zonelist(zonelist, high_zoneidx, NULL,
2128 &preferred_zone);
2129
2130 rebalance:
2131 /* This is the last chance, in general, before the goto nopage. */
2132 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2133 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2134 preferred_zone, migratetype);
2135 if (page)
2136 goto got_pg;
2137
2138 /* Allocate without watermarks if the context allows */
2139 if (alloc_flags & ALLOC_NO_WATERMARKS) {
2140 page = __alloc_pages_high_priority(gfp_mask, order,
2141 zonelist, high_zoneidx, nodemask,
2142 preferred_zone, migratetype);
2143 if (page)
2144 goto got_pg;
2145 }
2146
2147 /* Atomic allocations - we can't balance anything */
2148 if (!wait)
2149 goto nopage;
2150
2151 /* Avoid recursion of direct reclaim */
2152 if (current->flags & PF_MEMALLOC)
2153 goto nopage;
2154
2155 /* Avoid allocations with no watermarks from looping endlessly */
2156 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2157 goto nopage;
2158
2159 /*
2160 * Try direct compaction. The first pass is asynchronous. Subsequent
2161 * attempts after direct reclaim are synchronous
2162 */
2163 page = __alloc_pages_direct_compact(gfp_mask, order,
2164 zonelist, high_zoneidx,
2165 nodemask,
2166 alloc_flags, preferred_zone,
2167 migratetype, &did_some_progress,
2168 sync_migration);
2169 if (page)
2170 goto got_pg;
2171 sync_migration = true;
2172
2173 /* Try direct reclaim and then allocating */
2174 page = __alloc_pages_direct_reclaim(gfp_mask, order,
2175 zonelist, high_zoneidx,
2176 nodemask,
2177 alloc_flags, preferred_zone,
2178 migratetype, &did_some_progress);
2179 if (page)
2180 goto got_pg;
2181
2182 /*
2183 * If we failed to make any progress reclaiming, then we are
2184 * running out of options and have to consider going OOM
2185 */
2186 if (!did_some_progress) {
2187 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
2188 if (oom_killer_disabled)
2189 goto nopage;
2190 page = __alloc_pages_may_oom(gfp_mask, order,
2191 zonelist, high_zoneidx,
2192 nodemask, preferred_zone,
2193 migratetype);
2194 if (page)
2195 goto got_pg;
2196
2197 if (!(gfp_mask & __GFP_NOFAIL)) {
2198 /*
2199 * The oom killer is not called for high-order
2200 * allocations that may fail, so if no progress
2201 * is being made, there are no other options and
2202 * retrying is unlikely to help.
2203 */
2204 if (order > PAGE_ALLOC_COSTLY_ORDER)
2205 goto nopage;
2206 /*
2207 * The oom killer is not called for lowmem
2208 * allocations to prevent needlessly killing
2209 * innocent tasks.
2210 */
2211 if (high_zoneidx < ZONE_NORMAL)
2212 goto nopage;
2213 }
2214
2215 goto restart;
2216 }
2217 }
2218
2219 /* Check if we should retry the allocation */
2220 pages_reclaimed += did_some_progress;
2221 if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
2222 /* Wait for some write requests to complete then retry */
2223 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2224 goto rebalance;
2225 } else {
2226 /*
2227 * High-order allocations do not necessarily loop after
2228 * direct reclaim and reclaim/compaction depends on compaction
2229 * being called after reclaim so call directly if necessary
2230 */
2231 page = __alloc_pages_direct_compact(gfp_mask, order,
2232 zonelist, high_zoneidx,
2233 nodemask,
2234 alloc_flags, preferred_zone,
2235 migratetype, &did_some_progress,
2236 sync_migration);
2237 if (page)
2238 goto got_pg;
2239 }
2240
2241 nopage:
2242 warn_alloc_failed(gfp_mask, order, NULL);
2243 return page;
2244 got_pg:
2245 if (kmemcheck_enabled)
2246 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2247 return page;
2248
2249 }
2250
2251 /*
2252 * This is the 'heart' of the zoned buddy allocator.
2253 */
2254 struct page *
2255 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2256 struct zonelist *zonelist, nodemask_t *nodemask)
2257 {
2258 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2259 struct zone *preferred_zone;
2260 struct page *page;
2261 int migratetype = allocflags_to_migratetype(gfp_mask);
2262
2263 gfp_mask &= gfp_allowed_mask;
2264
2265 lockdep_trace_alloc(gfp_mask);
2266
2267 might_sleep_if(gfp_mask & __GFP_WAIT);
2268
2269 if (should_fail_alloc_page(gfp_mask, order))
2270 return NULL;
2271
2272 /*
2273 * Check the zones suitable for the gfp_mask contain at least one
2274 * valid zone. It's possible to have an empty zonelist as a result
2275 * of GFP_THISNODE and a memoryless node
2276 */
2277 if (unlikely(!zonelist->_zonerefs->zone))
2278 return NULL;
2279
2280 get_mems_allowed();
2281 /* The preferred zone is used for statistics later */
2282 first_zones_zonelist(zonelist, high_zoneidx,
2283 nodemask ? : &cpuset_current_mems_allowed,
2284 &preferred_zone);
2285 if (!preferred_zone) {
2286 put_mems_allowed();
2287 return NULL;
2288 }
2289
2290 /* First allocation attempt */
2291 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2292 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
2293 preferred_zone, migratetype);
2294 if (unlikely(!page))
2295 page = __alloc_pages_slowpath(gfp_mask, order,
2296 zonelist, high_zoneidx, nodemask,
2297 preferred_zone, migratetype);
2298 put_mems_allowed();
2299
2300 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2301 return page;
2302 }
2303 EXPORT_SYMBOL(__alloc_pages_nodemask);
2304
2305 /*
2306 * Common helper functions.
2307 */
2308 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2309 {
2310 struct page *page;
2311
2312 /*
2313 * __get_free_pages() returns a 32-bit address, which cannot represent
2314 * a highmem page
2315 */
2316 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2317
2318 page = alloc_pages(gfp_mask, order);
2319 if (!page)
2320 return 0;
2321 return (unsigned long) page_address(page);
2322 }
2323 EXPORT_SYMBOL(__get_free_pages);
2324
2325 unsigned long get_zeroed_page(gfp_t gfp_mask)
2326 {
2327 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2328 }
2329 EXPORT_SYMBOL(get_zeroed_page);
2330
2331 void __pagevec_free(struct pagevec *pvec)
2332 {
2333 int i = pagevec_count(pvec);
2334
2335 while (--i >= 0) {
2336 trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
2337 free_hot_cold_page(pvec->pages[i], pvec->cold);
2338 }
2339 }
2340
2341 void __free_pages(struct page *page, unsigned int order)
2342 {
2343 if (put_page_testzero(page)) {
2344 if (order == 0)
2345 free_hot_cold_page(page, 0);
2346 else
2347 __free_pages_ok(page, order);
2348 }
2349 }
2350
2351 EXPORT_SYMBOL(__free_pages);
2352
2353 void free_pages(unsigned long addr, unsigned int order)
2354 {
2355 if (addr != 0) {
2356 VM_BUG_ON(!virt_addr_valid((void *)addr));
2357 __free_pages(virt_to_page((void *)addr), order);
2358 }
2359 }
2360
2361 EXPORT_SYMBOL(free_pages);
2362
2363 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2364 {
2365 if (addr) {
2366 unsigned long alloc_end = addr + (PAGE_SIZE << order);
2367 unsigned long used = addr + PAGE_ALIGN(size);
2368
2369 split_page(virt_to_page((void *)addr), order);
2370 while (used < alloc_end) {
2371 free_page(used);
2372 used += PAGE_SIZE;
2373 }
2374 }
2375 return (void *)addr;
2376 }
2377
2378 /**
2379 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2380 * @size: the number of bytes to allocate
2381 * @gfp_mask: GFP flags for the allocation
2382 *
2383 * This function is similar to alloc_pages(), except that it allocates the
2384 * minimum number of pages to satisfy the request. alloc_pages() can only
2385 * allocate memory in power-of-two pages.
2386 *
2387 * This function is also limited by MAX_ORDER.
2388 *
2389 * Memory allocated by this function must be released by free_pages_exact().
2390 */
2391 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2392 {
2393 unsigned int order = get_order(size);
2394 unsigned long addr;
2395
2396 addr = __get_free_pages(gfp_mask, order);
2397 return make_alloc_exact(addr, order, size);
2398 }
2399 EXPORT_SYMBOL(alloc_pages_exact);
2400
2401 /**
2402 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
2403 * pages on a node.
2404 * @nid: the preferred node ID where memory should be allocated
2405 * @size: the number of bytes to allocate
2406 * @gfp_mask: GFP flags for the allocation
2407 *
2408 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
2409 * back.
2410 * Note this is not alloc_pages_exact_node() which allocates on a specific node,
2411 * but is not exact.
2412 */
2413 void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
2414 {
2415 unsigned order = get_order(size);
2416 struct page *p = alloc_pages_node(nid, gfp_mask, order);
2417 if (!p)
2418 return NULL;
2419 return make_alloc_exact((unsigned long)page_address(p), order, size);
2420 }
2421 EXPORT_SYMBOL(alloc_pages_exact_nid);
2422
2423 /**
2424 * free_pages_exact - release memory allocated via alloc_pages_exact()
2425 * @virt: the value returned by alloc_pages_exact.
2426 * @size: size of allocation, same value as passed to alloc_pages_exact().
2427 *
2428 * Release the memory allocated by a previous call to alloc_pages_exact.
2429 */
2430 void free_pages_exact(void *virt, size_t size)
2431 {
2432 unsigned long addr = (unsigned long)virt;
2433 unsigned long end = addr + PAGE_ALIGN(size);
2434
2435 while (addr < end) {
2436 free_page(addr);
2437 addr += PAGE_SIZE;
2438 }
2439 }
2440 EXPORT_SYMBOL(free_pages_exact);
2441
2442 static unsigned int nr_free_zone_pages(int offset)
2443 {
2444 struct zoneref *z;
2445 struct zone *zone;
2446
2447 /* Just pick one node, since fallback list is circular */
2448 unsigned int sum = 0;
2449
2450 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2451
2452 for_each_zone_zonelist(zone, z, zonelist, offset) {
2453 unsigned long size = zone->present_pages;
2454 unsigned long high = high_wmark_pages(zone);
2455 if (size > high)
2456 sum += size - high;
2457 }
2458
2459 return sum;
2460 }
2461
2462 /*
2463 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2464 */
2465 unsigned int nr_free_buffer_pages(void)
2466 {
2467 return nr_free_zone_pages(gfp_zone(GFP_USER));
2468 }
2469 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2470
2471 /*
2472 * Amount of free RAM allocatable within all zones
2473 */
2474 unsigned int nr_free_pagecache_pages(void)
2475 {
2476 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2477 }
2478
2479 static inline void show_node(struct zone *zone)
2480 {
2481 if (NUMA_BUILD)
2482 printk("Node %d ", zone_to_nid(zone));
2483 }
2484
2485 void si_meminfo(struct sysinfo *val)
2486 {
2487 val->totalram = totalram_pages;
2488 val->sharedram = 0;
2489 val->freeram = global_page_state(NR_FREE_PAGES);
2490 val->bufferram = nr_blockdev_pages();
2491 val->totalhigh = totalhigh_pages;
2492 val->freehigh = nr_free_highpages();
2493 val->mem_unit = PAGE_SIZE;
2494 }
2495
2496 EXPORT_SYMBOL(si_meminfo);
2497
2498 #ifdef CONFIG_NUMA
2499 void si_meminfo_node(struct sysinfo *val, int nid)
2500 {
2501 pg_data_t *pgdat = NODE_DATA(nid);
2502
2503 val->totalram = pgdat->node_present_pages;
2504 val->freeram = node_page_state(nid, NR_FREE_PAGES);
2505 #ifdef CONFIG_HIGHMEM
2506 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2507 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2508 NR_FREE_PAGES);
2509 #else
2510 val->totalhigh = 0;
2511 val->freehigh = 0;
2512 #endif
2513 val->mem_unit = PAGE_SIZE;
2514 }
2515 #endif
2516
2517 /*
2518 * Determine whether the node should be displayed or not, depending on whether
2519 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
2520 */
2521 bool skip_free_areas_node(unsigned int flags, int nid)
2522 {
2523 bool ret = false;
2524
2525 if (!(flags & SHOW_MEM_FILTER_NODES))
2526 goto out;
2527
2528 get_mems_allowed();
2529 ret = !node_isset(nid, cpuset_current_mems_allowed);
2530 put_mems_allowed();
2531 out:
2532 return ret;
2533 }
2534
2535 #define K(x) ((x) << (PAGE_SHIFT-10))
2536
2537 /*
2538 * Show free area list (used inside shift_scroll-lock stuff)
2539 * We also calculate the percentage fragmentation. We do this by counting the
2540 * memory on each free list with the exception of the first item on the list.
2541 * Suppresses nodes that are not allowed by current's cpuset if
2542 * SHOW_MEM_FILTER_NODES is passed.
2543 */
2544 void show_free_areas(unsigned int filter)
2545 {
2546 int cpu;
2547 struct zone *zone;
2548
2549 for_each_populated_zone(zone) {
2550 if (skip_free_areas_node(filter, zone_to_nid(zone)))
2551 continue;
2552 show_node(zone);
2553 printk("%s per-cpu:\n", zone->name);
2554
2555 for_each_online_cpu(cpu) {
2556 struct per_cpu_pageset *pageset;
2557
2558 pageset = per_cpu_ptr(zone->pageset, cpu);
2559
2560 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2561 cpu, pageset->pcp.high,
2562 pageset->pcp.batch, pageset->pcp.count);
2563 }
2564 }
2565
2566 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2567 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2568 " unevictable:%lu"
2569 " dirty:%lu writeback:%lu unstable:%lu\n"
2570 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2571 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2572 global_page_state(NR_ACTIVE_ANON),
2573 global_page_state(NR_INACTIVE_ANON),
2574 global_page_state(NR_ISOLATED_ANON),
2575 global_page_state(NR_ACTIVE_FILE),
2576 global_page_state(NR_INACTIVE_FILE),
2577 global_page_state(NR_ISOLATED_FILE),
2578 global_page_state(NR_UNEVICTABLE),
2579 global_page_state(NR_FILE_DIRTY),
2580 global_page_state(NR_WRITEBACK),
2581 global_page_state(NR_UNSTABLE_NFS),
2582 global_page_state(NR_FREE_PAGES),
2583 global_page_state(NR_SLAB_RECLAIMABLE),
2584 global_page_state(NR_SLAB_UNRECLAIMABLE),
2585 global_page_state(NR_FILE_MAPPED),
2586 global_page_state(NR_SHMEM),
2587 global_page_state(NR_PAGETABLE),
2588 global_page_state(NR_BOUNCE));
2589
2590 for_each_populated_zone(zone) {
2591 int i;
2592
2593 if (skip_free_areas_node(filter, zone_to_nid(zone)))
2594 continue;
2595 show_node(zone);
2596 printk("%s"
2597 " free:%lukB"
2598 " min:%lukB"
2599 " low:%lukB"
2600 " high:%lukB"
2601 " active_anon:%lukB"
2602 " inactive_anon:%lukB"
2603 " active_file:%lukB"
2604 " inactive_file:%lukB"
2605 " unevictable:%lukB"
2606 " isolated(anon):%lukB"
2607 " isolated(file):%lukB"
2608 " present:%lukB"
2609 " mlocked:%lukB"
2610 " dirty:%lukB"
2611 " writeback:%lukB"
2612 " mapped:%lukB"
2613 " shmem:%lukB"
2614 " slab_reclaimable:%lukB"
2615 " slab_unreclaimable:%lukB"
2616 " kernel_stack:%lukB"
2617 " pagetables:%lukB"
2618 " unstable:%lukB"
2619 " bounce:%lukB"
2620 " writeback_tmp:%lukB"
2621 " pages_scanned:%lu"
2622 " all_unreclaimable? %s"
2623 "\n",
2624 zone->name,
2625 K(zone_page_state(zone, NR_FREE_PAGES)),
2626 K(min_wmark_pages(zone)),
2627 K(low_wmark_pages(zone)),
2628 K(high_wmark_pages(zone)),
2629 K(zone_page_state(zone, NR_ACTIVE_ANON)),
2630 K(zone_page_state(zone, NR_INACTIVE_ANON)),
2631 K(zone_page_state(zone, NR_ACTIVE_FILE)),
2632 K(zone_page_state(zone, NR_INACTIVE_FILE)),
2633 K(zone_page_state(zone, NR_UNEVICTABLE)),
2634 K(zone_page_state(zone, NR_ISOLATED_ANON)),
2635 K(zone_page_state(zone, NR_ISOLATED_FILE)),
2636 K(zone->present_pages),
2637 K(zone_page_state(zone, NR_MLOCK)),
2638 K(zone_page_state(zone, NR_FILE_DIRTY)),
2639 K(zone_page_state(zone, NR_WRITEBACK)),
2640 K(zone_page_state(zone, NR_FILE_MAPPED)),
2641 K(zone_page_state(zone, NR_SHMEM)),
2642 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2643 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2644 zone_page_state(zone, NR_KERNEL_STACK) *
2645 THREAD_SIZE / 1024,
2646 K(zone_page_state(zone, NR_PAGETABLE)),
2647 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2648 K(zone_page_state(zone, NR_BOUNCE)),
2649 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2650 zone->pages_scanned,
2651 (zone->all_unreclaimable ? "yes" : "no")
2652 );
2653 printk("lowmem_reserve[]:");
2654 for (i = 0; i < MAX_NR_ZONES; i++)
2655 printk(" %lu", zone->lowmem_reserve[i]);
2656 printk("\n");
2657 }
2658
2659 for_each_populated_zone(zone) {
2660 unsigned long nr[MAX_ORDER], flags, order, total = 0;
2661
2662 if (skip_free_areas_node(filter, zone_to_nid(zone)))
2663 continue;
2664 show_node(zone);
2665 printk("%s: ", zone->name);
2666
2667 spin_lock_irqsave(&zone->lock, flags);
2668 for (order = 0; order < MAX_ORDER; order++) {
2669 nr[order] = zone->free_area[order].nr_free;
2670 total += nr[order] << order;
2671 }
2672 spin_unlock_irqrestore(&zone->lock, flags);
2673 for (order = 0; order < MAX_ORDER; order++)
2674 printk("%lu*%lukB ", nr[order], K(1UL) << order);
2675 printk("= %lukB\n", K(total));
2676 }
2677
2678 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2679
2680 show_swap_cache_info();
2681 }
2682
2683 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2684 {
2685 zoneref->zone = zone;
2686 zoneref->zone_idx = zone_idx(zone);
2687 }
2688
2689 /*
2690 * Builds allocation fallback zone lists.
2691 *
2692 * Add all populated zones of a node to the zonelist.
2693 */
2694 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2695 int nr_zones, enum zone_type zone_type)
2696 {
2697 struct zone *zone;
2698
2699 BUG_ON(zone_type >= MAX_NR_ZONES);
2700 zone_type++;
2701
2702 do {
2703 zone_type--;
2704 zone = pgdat->node_zones + zone_type;
2705 if (populated_zone(zone)) {
2706 zoneref_set_zone(zone,
2707 &zonelist->_zonerefs[nr_zones++]);
2708 check_highest_zone(zone_type);
2709 }
2710
2711 } while (zone_type);
2712 return nr_zones;
2713 }
2714
2715
2716 /*
2717 * zonelist_order:
2718 * 0 = automatic detection of better ordering.
2719 * 1 = order by ([node] distance, -zonetype)
2720 * 2 = order by (-zonetype, [node] distance)
2721 *
2722 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2723 * the same zonelist. So only NUMA can configure this param.
2724 */
2725 #define ZONELIST_ORDER_DEFAULT 0
2726 #define ZONELIST_ORDER_NODE 1
2727 #define ZONELIST_ORDER_ZONE 2
2728
2729 /* zonelist order in the kernel.
2730 * set_zonelist_order() will set this to NODE or ZONE.
2731 */
2732 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2733 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2734
2735
2736 #ifdef CONFIG_NUMA
2737 /* The value user specified ....changed by config */
2738 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2739 /* string for sysctl */
2740 #define NUMA_ZONELIST_ORDER_LEN 16
2741 char numa_zonelist_order[16] = "default";
2742
2743 /*
2744 * interface for configure zonelist ordering.
2745 * command line option "numa_zonelist_order"
2746 * = "[dD]efault - default, automatic configuration.
2747 * = "[nN]ode - order by node locality, then by zone within node
2748 * = "[zZ]one - order by zone, then by locality within zone
2749 */
2750
2751 static int __parse_numa_zonelist_order(char *s)
2752 {
2753 if (*s == 'd' || *s == 'D') {
2754 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2755 } else if (*s == 'n' || *s == 'N') {
2756 user_zonelist_order = ZONELIST_ORDER_NODE;
2757 } else if (*s == 'z' || *s == 'Z') {
2758 user_zonelist_order = ZONELIST_ORDER_ZONE;
2759 } else {
2760 printk(KERN_WARNING
2761 "Ignoring invalid numa_zonelist_order value: "
2762 "%s\n", s);
2763 return -EINVAL;
2764 }
2765 return 0;
2766 }
2767
2768 static __init int setup_numa_zonelist_order(char *s)
2769 {
2770 int ret;
2771
2772 if (!s)
2773 return 0;
2774
2775 ret = __parse_numa_zonelist_order(s);
2776 if (ret == 0)
2777 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
2778
2779 return ret;
2780 }
2781 early_param("numa_zonelist_order", setup_numa_zonelist_order);
2782
2783 /*
2784 * sysctl handler for numa_zonelist_order
2785 */
2786 int numa_zonelist_order_handler(ctl_table *table, int write,
2787 void __user *buffer, size_t *length,
2788 loff_t *ppos)
2789 {
2790 char saved_string[NUMA_ZONELIST_ORDER_LEN];
2791 int ret;
2792 static DEFINE_MUTEX(zl_order_mutex);
2793
2794 mutex_lock(&zl_order_mutex);
2795 if (write)
2796 strcpy(saved_string, (char*)table->data);
2797 ret = proc_dostring(table, write, buffer, length, ppos);
2798 if (ret)
2799 goto out;
2800 if (write) {
2801 int oldval = user_zonelist_order;
2802 if (__parse_numa_zonelist_order((char*)table->data)) {
2803 /*
2804 * bogus value. restore saved string
2805 */
2806 strncpy((char*)table->data, saved_string,
2807 NUMA_ZONELIST_ORDER_LEN);
2808 user_zonelist_order = oldval;
2809 } else if (oldval != user_zonelist_order) {
2810 mutex_lock(&zonelists_mutex);
2811 build_all_zonelists(NULL);
2812 mutex_unlock(&zonelists_mutex);
2813 }
2814 }
2815 out:
2816 mutex_unlock(&zl_order_mutex);
2817 return ret;
2818 }
2819
2820
2821 #define MAX_NODE_LOAD (nr_online_nodes)
2822 static int node_load[MAX_NUMNODES];
2823
2824 /**
2825 * find_next_best_node - find the next node that should appear in a given node's fallback list
2826 * @node: node whose fallback list we're appending
2827 * @used_node_mask: nodemask_t of already used nodes
2828 *
2829 * We use a number of factors to determine which is the next node that should
2830 * appear on a given node's fallback list. The node should not have appeared
2831 * already in @node's fallback list, and it should be the next closest node
2832 * according to the distance array (which contains arbitrary distance values
2833 * from each node to each node in the system), and should also prefer nodes
2834 * with no CPUs, since presumably they'll have very little allocation pressure
2835 * on them otherwise.
2836 * It returns -1 if no node is found.
2837 */
2838 static int find_next_best_node(int node, nodemask_t *used_node_mask)
2839 {
2840 int n, val;
2841 int min_val = INT_MAX;
2842 int best_node = -1;
2843 const struct cpumask *tmp = cpumask_of_node(0);
2844
2845 /* Use the local node if we haven't already */
2846 if (!node_isset(node, *used_node_mask)) {
2847 node_set(node, *used_node_mask);
2848 return node;
2849 }
2850
2851 for_each_node_state(n, N_HIGH_MEMORY) {
2852
2853 /* Don't want a node to appear more than once */
2854 if (node_isset(n, *used_node_mask))
2855 continue;
2856
2857 /* Use the distance array to find the distance */
2858 val = node_distance(node, n);
2859
2860 /* Penalize nodes under us ("prefer the next node") */
2861 val += (n < node);
2862
2863 /* Give preference to headless and unused nodes */
2864 tmp = cpumask_of_node(n);
2865 if (!cpumask_empty(tmp))
2866 val += PENALTY_FOR_NODE_WITH_CPUS;
2867
2868 /* Slight preference for less loaded node */
2869 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2870 val += node_load[n];
2871
2872 if (val < min_val) {
2873 min_val = val;
2874 best_node = n;
2875 }
2876 }
2877
2878 if (best_node >= 0)
2879 node_set(best_node, *used_node_mask);
2880
2881 return best_node;
2882 }
2883
2884
2885 /*
2886 * Build zonelists ordered by node and zones within node.
2887 * This results in maximum locality--normal zone overflows into local
2888 * DMA zone, if any--but risks exhausting DMA zone.
2889 */
2890 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2891 {
2892 int j;
2893 struct zonelist *zonelist;
2894
2895 zonelist = &pgdat->node_zonelists[0];
2896 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2897 ;
2898 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2899 MAX_NR_ZONES - 1);
2900 zonelist->_zonerefs[j].zone = NULL;
2901 zonelist->_zonerefs[j].zone_idx = 0;
2902 }
2903
2904 /*
2905 * Build gfp_thisnode zonelists
2906 */
2907 static void build_thisnode_zonelists(pg_data_t *pgdat)
2908 {
2909 int j;
2910 struct zonelist *zonelist;
2911
2912 zonelist = &pgdat->node_zonelists[1];
2913 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2914 zonelist->_zonerefs[j].zone = NULL;
2915 zonelist->_zonerefs[j].zone_idx = 0;
2916 }
2917
2918 /*
2919 * Build zonelists ordered by zone and nodes within zones.
2920 * This results in conserving DMA zone[s] until all Normal memory is
2921 * exhausted, but results in overflowing to remote node while memory
2922 * may still exist in local DMA zone.
2923 */
2924 static int node_order[MAX_NUMNODES];
2925
2926 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2927 {
2928 int pos, j, node;
2929 int zone_type; /* needs to be signed */
2930 struct zone *z;
2931 struct zonelist *zonelist;
2932
2933 zonelist = &pgdat->node_zonelists[0];
2934 pos = 0;
2935 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2936 for (j = 0; j < nr_nodes; j++) {
2937 node = node_order[j];
2938 z = &NODE_DATA(node)->node_zones[zone_type];
2939 if (populated_zone(z)) {
2940 zoneref_set_zone(z,
2941 &zonelist->_zonerefs[pos++]);
2942 check_highest_zone(zone_type);
2943 }
2944 }
2945 }
2946 zonelist->_zonerefs[pos].zone = NULL;
2947 zonelist->_zonerefs[pos].zone_idx = 0;
2948 }
2949
2950 static int default_zonelist_order(void)
2951 {
2952 int nid, zone_type;
2953 unsigned long low_kmem_size,total_size;
2954 struct zone *z;
2955 int average_size;
2956 /*
2957 * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
2958 * If they are really small and used heavily, the system can fall
2959 * into OOM very easily.
2960 * This function detect ZONE_DMA/DMA32 size and configures zone order.
2961 */
2962 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2963 low_kmem_size = 0;
2964 total_size = 0;
2965 for_each_online_node(nid) {
2966 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2967 z = &NODE_DATA(nid)->node_zones[zone_type];
2968 if (populated_zone(z)) {
2969 if (zone_type < ZONE_NORMAL)
2970 low_kmem_size += z->present_pages;
2971 total_size += z->present_pages;
2972 } else if (zone_type == ZONE_NORMAL) {
2973 /*
2974 * If any node has only lowmem, then node order
2975 * is preferred to allow kernel allocations
2976 * locally; otherwise, they can easily infringe
2977 * on other nodes when there is an abundance of
2978 * lowmem available to allocate from.
2979 */
2980 return ZONELIST_ORDER_NODE;
2981 }
2982 }
2983 }
2984 if (!low_kmem_size || /* there are no DMA area. */
2985 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2986 return ZONELIST_ORDER_NODE;
2987 /*
2988 * look into each node's config.
2989 * If there is a node whose DMA/DMA32 memory is very big area on
2990 * local memory, NODE_ORDER may be suitable.
2991 */
2992 average_size = total_size /
2993 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2994 for_each_online_node(nid) {
2995 low_kmem_size = 0;
2996 total_size = 0;
2997 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2998 z = &NODE_DATA(nid)->node_zones[zone_type];
2999 if (populated_zone(z)) {
3000 if (zone_type < ZONE_NORMAL)
3001 low_kmem_size += z->present_pages;
3002 total_size += z->present_pages;
3003 }
3004 }
3005 if (low_kmem_size &&
3006 total_size > average_size && /* ignore small node */
3007 low_kmem_size > total_size * 70/100)
3008 return ZONELIST_ORDER_NODE;
3009 }
3010 return ZONELIST_ORDER_ZONE;
3011 }
3012
3013 static void set_zonelist_order(void)
3014 {
3015 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
3016 current_zonelist_order = default_zonelist_order();
3017 else
3018 current_zonelist_order = user_zonelist_order;
3019 }
3020
3021 static void build_zonelists(pg_data_t *pgdat)
3022 {
3023 int j, node, load;
3024 enum zone_type i;
3025 nodemask_t used_mask;
3026 int local_node, prev_node;
3027 struct zonelist *zonelist;
3028 int order = current_zonelist_order;
3029
3030 /* initialize zonelists */
3031 for (i = 0; i < MAX_ZONELISTS; i++) {
3032 zonelist = pgdat->node_zonelists + i;
3033 zonelist->_zonerefs[0].zone = NULL;
3034 zonelist->_zonerefs[0].zone_idx = 0;
3035 }
3036
3037 /* NUMA-aware ordering of nodes */
3038 local_node = pgdat->node_id;
3039 load = nr_online_nodes;
3040 prev_node = local_node;
3041 nodes_clear(used_mask);
3042
3043 memset(node_order, 0, sizeof(node_order));
3044 j = 0;
3045
3046 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
3047 int distance = node_distance(local_node, node);
3048
3049 /*
3050 * If another node is sufficiently far away then it is better
3051 * to reclaim pages in a zone before going off node.
3052 */
3053 if (distance > RECLAIM_DISTANCE)
3054 zone_reclaim_mode = 1;
3055
3056 /*
3057 * We don't want to pressure a particular node.
3058 * So adding penalty to the first node in same
3059 * distance group to make it round-robin.
3060 */
3061 if (distance != node_distance(local_node, prev_node))
3062 node_load[node] = load;
3063
3064 prev_node = node;
3065 load--;
3066 if (order == ZONELIST_ORDER_NODE)
3067 build_zonelists_in_node_order(pgdat, node);
3068 else
3069 node_order[j++] = node; /* remember order */
3070 }
3071
3072 if (order == ZONELIST_ORDER_ZONE) {
3073 /* calculate node order -- i.e., DMA last! */
3074 build_zonelists_in_zone_order(pgdat, j);
3075 }
3076
3077 build_thisnode_zonelists(pgdat);
3078 }
3079
3080 /* Construct the zonelist performance cache - see further mmzone.h */
3081 static void build_zonelist_cache(pg_data_t *pgdat)
3082 {
3083 struct zonelist *zonelist;
3084 struct zonelist_cache *zlc;
3085 struct zoneref *z;
3086
3087 zonelist = &pgdat->node_zonelists[0];
3088 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3089 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
3090 for (z = zonelist->_zonerefs; z->zone; z++)
3091 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
3092 }
3093
3094 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
3095 /*
3096 * Return node id of node used for "local" allocations.
3097 * I.e., first node id of first zone in arg node's generic zonelist.
3098 * Used for initializing percpu 'numa_mem', which is used primarily
3099 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
3100 */
3101 int local_memory_node(int node)
3102 {
3103 struct zone *zone;
3104
3105 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3106 gfp_zone(GFP_KERNEL),
3107 NULL,
3108 &zone);
3109 return zone->node;
3110 }
3111 #endif
3112
3113 #else /* CONFIG_NUMA */
3114
3115 static void set_zonelist_order(void)
3116 {
3117 current_zonelist_order = ZONELIST_ORDER_ZONE;
3118 }
3119
3120 static void build_zonelists(pg_data_t *pgdat)
3121 {
3122 int node, local_node;
3123 enum zone_type j;
3124 struct zonelist *zonelist;
3125
3126 local_node = pgdat->node_id;
3127
3128 zonelist = &pgdat->node_zonelists[0];
3129 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3130
3131 /*
3132 * Now we build the zonelist so that it contains the zones
3133 * of all the other nodes.
3134 * We don't want to pressure a particular node, so when
3135 * building the zones for node N, we make sure that the
3136 * zones coming right after the local ones are those from
3137 * node N+1 (modulo N)
3138 */
3139 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3140 if (!node_online(node))
3141 continue;
3142 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3143 MAX_NR_ZONES - 1);
3144 }
3145 for (node = 0; node < local_node; node++) {
3146 if (!node_online(node))
3147 continue;
3148 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3149 MAX_NR_ZONES - 1);
3150 }
3151
3152 zonelist->_zonerefs[j].zone = NULL;
3153 zonelist->_zonerefs[j].zone_idx = 0;
3154 }
3155
3156 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
3157 static void build_zonelist_cache(pg_data_t *pgdat)
3158 {
3159 pgdat->node_zonelists[0].zlcache_ptr = NULL;
3160 }
3161
3162 #endif /* CONFIG_NUMA */
3163
3164 /*
3165 * Boot pageset table. One per cpu which is going to be used for all
3166 * zones and all nodes. The parameters will be set in such a way
3167 * that an item put on a list will immediately be handed over to
3168 * the buddy list. This is safe since pageset manipulation is done
3169 * with interrupts disabled.
3170 *
3171 * The boot_pagesets must be kept even after bootup is complete for
3172 * unused processors and/or zones. They do play a role for bootstrapping
3173 * hotplugged processors.
3174 *
3175 * zoneinfo_show() and maybe other functions do
3176 * not check if the processor is online before following the pageset pointer.
3177 * Other parts of the kernel may not check if the zone is available.
3178 */
3179 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3180 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
3181 static void setup_zone_pageset(struct zone *zone);
3182
3183 /*
3184 * Global mutex to protect against size modification of zonelists
3185 * as well as to serialize pageset setup for the new populated zone.
3186 */
3187 DEFINE_MUTEX(zonelists_mutex);
3188
3189 /* return values int ....just for stop_machine() */
3190 static __init_refok int __build_all_zonelists(void *data)
3191 {
3192 int nid;
3193 int cpu;
3194
3195 #ifdef CONFIG_NUMA
3196 memset(node_load, 0, sizeof(node_load));
3197 #endif
3198 for_each_online_node(nid) {
3199 pg_data_t *pgdat = NODE_DATA(nid);
3200
3201 build_zonelists(pgdat);
3202 build_zonelist_cache(pgdat);
3203 }
3204
3205 /*
3206 * Initialize the boot_pagesets that are going to be used
3207 * for bootstrapping processors. The real pagesets for
3208 * each zone will be allocated later when the per cpu
3209 * allocator is available.
3210 *
3211 * boot_pagesets are used also for bootstrapping offline
3212 * cpus if the system is already booted because the pagesets
3213 * are needed to initialize allocators on a specific cpu too.
3214 * F.e. the percpu allocator needs the page allocator which
3215 * needs the percpu allocator in order to allocate its pagesets
3216 * (a chicken-egg dilemma).
3217 */
3218 for_each_possible_cpu(cpu) {
3219 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3220
3221 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
3222 /*
3223 * We now know the "local memory node" for each node--
3224 * i.e., the node of the first zone in the generic zonelist.
3225 * Set up numa_mem percpu variable for on-line cpus. During
3226 * boot, only the boot cpu should be on-line; we'll init the
3227 * secondary cpus' numa_mem as they come on-line. During
3228 * node/memory hotplug, we'll fixup all on-line cpus.
3229 */
3230 if (cpu_online(cpu))
3231 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3232 #endif
3233 }
3234
3235 return 0;
3236 }
3237
3238 /*
3239 * Called with zonelists_mutex held always
3240 * unless system_state == SYSTEM_BOOTING.
3241 */
3242 void __ref build_all_zonelists(void *data)
3243 {
3244 set_zonelist_order();
3245
3246 if (system_state == SYSTEM_BOOTING) {
3247 __build_all_zonelists(NULL);
3248 mminit_verify_zonelist();
3249 cpuset_init_current_mems_allowed();
3250 } else {
3251 /* we have to stop all cpus to guarantee there is no user
3252 of zonelist */
3253 #ifdef CONFIG_MEMORY_HOTPLUG
3254 if (data)
3255 setup_zone_pageset((struct zone *)data);
3256 #endif
3257 stop_machine(__build_all_zonelists, NULL, NULL);
3258 /* cpuset refresh routine should be here */
3259 }
3260 vm_total_pages = nr_free_pagecache_pages();
3261 /*
3262 * Disable grouping by mobility if the number of pages in the
3263 * system is too low to allow the mechanism to work. It would be
3264 * more accurate, but expensive to check per-zone. This check is
3265 * made on memory-hotadd so a system can start with mobility
3266 * disabled and enable it later
3267 */
3268 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
3269 page_group_by_mobility_disabled = 1;
3270 else
3271 page_group_by_mobility_disabled = 0;
3272
3273 printk("Built %i zonelists in %s order, mobility grouping %s. "
3274 "Total pages: %ld\n",
3275 nr_online_nodes,
3276 zonelist_order_name[current_zonelist_order],
3277 page_group_by_mobility_disabled ? "off" : "on",
3278 vm_total_pages);
3279 #ifdef CONFIG_NUMA
3280 printk("Policy zone: %s\n", zone_names[policy_zone]);
3281 #endif
3282 }
3283
3284 /*
3285 * Helper functions to size the waitqueue hash table.
3286 * Essentially these want to choose hash table sizes sufficiently
3287 * large so that collisions trying to wait on pages are rare.
3288 * But in fact, the number of active page waitqueues on typical
3289 * systems is ridiculously low, less than 200. So this is even
3290 * conservative, even though it seems large.
3291 *
3292 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3293 * waitqueues, i.e. the size of the waitq table given the number of pages.
3294 */
3295 #define PAGES_PER_WAITQUEUE 256
3296
3297 #ifndef CONFIG_MEMORY_HOTPLUG
3298 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3299 {
3300 unsigned long size = 1;
3301
3302 pages /= PAGES_PER_WAITQUEUE;
3303
3304 while (size < pages)
3305 size <<= 1;
3306
3307 /*
3308 * Once we have dozens or even hundreds of threads sleeping
3309 * on IO we've got bigger problems than wait queue collision.
3310 * Limit the size of the wait table to a reasonable size.
3311 */
3312 size = min(size, 4096UL);
3313
3314 return max(size, 4UL);
3315 }
3316 #else
3317 /*
3318 * A zone's size might be changed by hot-add, so it is not possible to determine
3319 * a suitable size for its wait_table. So we use the maximum size now.
3320 *
3321 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
3322 *
3323 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
3324 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3325 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
3326 *
3327 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3328 * or more by the traditional way. (See above). It equals:
3329 *
3330 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
3331 * ia64(16K page size) : = ( 8G + 4M)byte.
3332 * powerpc (64K page size) : = (32G +16M)byte.
3333 */
3334 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3335 {
3336 return 4096UL;
3337 }
3338 #endif
3339
3340 /*
3341 * This is an integer logarithm so that shifts can be used later
3342 * to extract the more random high bits from the multiplicative
3343 * hash function before the remainder is taken.
3344 */
3345 static inline unsigned long wait_table_bits(unsigned long size)
3346 {
3347 return ffz(~size);
3348 }
3349
3350 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3351
3352 /*
3353 * Check if a pageblock contains reserved pages
3354 */
3355 static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
3356 {
3357 unsigned long pfn;
3358
3359 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3360 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
3361 return 1;
3362 }
3363 return 0;
3364 }
3365
3366 /*
3367 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
3368 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3369 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
3370 * higher will lead to a bigger reserve which will get freed as contiguous
3371 * blocks as reclaim kicks in
3372 */
3373 static void setup_zone_migrate_reserve(struct zone *zone)
3374 {
3375 unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
3376 struct page *page;
3377 unsigned long block_migratetype;
3378 int reserve;
3379
3380 /* Get the start pfn, end pfn and the number of blocks to reserve */
3381 start_pfn = zone->zone_start_pfn;
3382 end_pfn = start_pfn + zone->spanned_pages;
3383 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3384 pageblock_order;
3385
3386 /*
3387 * Reserve blocks are generally in place to help high-order atomic
3388 * allocations that are short-lived. A min_free_kbytes value that
3389 * would result in more than 2 reserve blocks for atomic allocations
3390 * is assumed to be in place to help anti-fragmentation for the
3391 * future allocation of hugepages at runtime.
3392 */
3393 reserve = min(2, reserve);
3394
3395 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
3396 if (!pfn_valid(pfn))
3397 continue;
3398 page = pfn_to_page(pfn);
3399
3400 /* Watch out for overlapping nodes */
3401 if (page_to_nid(page) != zone_to_nid(zone))
3402 continue;
3403
3404 /* Blocks with reserved pages will never free, skip them. */
3405 block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
3406 if (pageblock_is_reserved(pfn, block_end_pfn))
3407 continue;
3408
3409 block_migratetype = get_pageblock_migratetype(page);
3410
3411 /* If this block is reserved, account for it */
3412 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
3413 reserve--;
3414 continue;
3415 }
3416
3417 /* Suitable for reserving if this block is movable */
3418 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
3419 set_pageblock_migratetype(page, MIGRATE_RESERVE);
3420 move_freepages_block(zone, page, MIGRATE_RESERVE);
3421 reserve--;
3422 continue;
3423 }
3424
3425 /*
3426 * If the reserve is met and this is a previous reserved block,
3427 * take it back
3428 */
3429 if (block_migratetype == MIGRATE_RESERVE) {
3430 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3431 move_freepages_block(zone, page, MIGRATE_MOVABLE);
3432 }
3433 }
3434 }
3435
3436 /*
3437 * Initially all pages are reserved - free ones are freed
3438 * up by free_all_bootmem() once the early boot process is
3439 * done. Non-atomic initialization, single-pass.
3440 */
3441 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3442 unsigned long start_pfn, enum memmap_context context)
3443 {
3444 struct page *page;
3445 unsigned long end_pfn = start_pfn + size;
3446 unsigned long pfn;
3447 struct zone *z;
3448
3449 if (highest_memmap_pfn < end_pfn - 1)
3450 highest_memmap_pfn = end_pfn - 1;
3451
3452 z = &NODE_DATA(nid)->node_zones[zone];
3453 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3454 /*
3455 * There can be holes in boot-time mem_map[]s
3456 * handed to this function. They do not
3457 * exist on hotplugged memory.
3458 */
3459 if (context == MEMMAP_EARLY) {
3460 if (!early_pfn_valid(pfn))
3461 continue;
3462 if (!early_pfn_in_nid(pfn, nid))
3463 continue;
3464 }
3465 page = pfn_to_page(pfn);
3466 set_page_links(page, zone, nid, pfn);
3467 mminit_verify_page_links(page, zone, nid, pfn);
3468 init_page_count(page);
3469 reset_page_mapcount(page);
3470 SetPageReserved(page);
3471 /*
3472 * Mark the block movable so that blocks are reserved for
3473 * movable at startup. This will force kernel allocations
3474 * to reserve their blocks rather than leaking throughout
3475 * the address space during boot when many long-lived
3476 * kernel allocations are made. Later some blocks near
3477 * the start are marked MIGRATE_RESERVE by
3478 * setup_zone_migrate_reserve()
3479 *
3480 * bitmap is created for zone's valid pfn range. but memmap
3481 * can be created for invalid pages (for alignment)
3482 * check here not to call set_pageblock_migratetype() against
3483 * pfn out of zone.
3484 */
3485 if ((z->zone_start_pfn <= pfn)
3486 && (pfn < z->zone_start_pfn + z->spanned_pages)
3487 && !(pfn & (pageblock_nr_pages - 1)))
3488 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3489
3490 INIT_LIST_HEAD(&page->lru);
3491 #ifdef WANT_PAGE_VIRTUAL
3492 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
3493 if (!is_highmem_idx(zone))
3494 set_page_address(page, __va(pfn << PAGE_SHIFT));
3495 #endif
3496 }
3497 }
3498
3499 static void __meminit zone_init_free_lists(struct zone *zone)
3500 {
3501 int order, t;
3502 for_each_migratetype_order(order, t) {
3503 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3504 zone->free_area[order].nr_free = 0;
3505 }
3506 }
3507
3508 #ifndef __HAVE_ARCH_MEMMAP_INIT
3509 #define memmap_init(size, nid, zone, start_pfn) \
3510 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3511 #endif
3512
3513 static int zone_batchsize(struct zone *zone)
3514 {
3515 #ifdef CONFIG_MMU
3516 int batch;
3517
3518 /*
3519 * The per-cpu-pages pools are set to around 1000th of the
3520 * size of the zone. But no more than 1/2 of a meg.
3521 *
3522 * OK, so we don't know how big the cache is. So guess.
3523 */
3524 batch = zone->present_pages / 1024;
3525 if (batch * PAGE_SIZE > 512 * 1024)
3526 batch = (512 * 1024) / PAGE_SIZE;
3527 batch /= 4; /* We effectively *= 4 below */
3528 if (batch < 1)
3529 batch = 1;
3530
3531 /*
3532 * Clamp the batch to a 2^n - 1 value. Having a power
3533 * of 2 value was found to be more likely to have
3534 * suboptimal cache aliasing properties in some cases.
3535 *
3536 * For example if 2 tasks are alternately allocating
3537 * batches of pages, one task can end up with a lot
3538 * of pages of one half of the possible page colors
3539 * and the other with pages of the other colors.
3540 */
3541 batch = rounddown_pow_of_two(batch + batch/2) - 1;
3542
3543 return batch;
3544
3545 #else
3546 /* The deferral and batching of frees should be suppressed under NOMMU
3547 * conditions.
3548 *
3549 * The problem is that NOMMU needs to be able to allocate large chunks
3550 * of contiguous memory as there's no hardware page translation to
3551 * assemble apparent contiguous memory from discontiguous pages.
3552 *
3553 * Queueing large contiguous runs of pages for batching, however,
3554 * causes the pages to actually be freed in smaller chunks. As there
3555 * can be a significant delay between the individual batches being
3556 * recycled, this leads to the once large chunks of space being
3557 * fragmented and becoming unavailable for high-order allocations.
3558 */
3559 return 0;
3560 #endif
3561 }
3562
3563 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3564 {
3565 struct per_cpu_pages *pcp;
3566 int migratetype;
3567
3568 memset(p, 0, sizeof(*p));
3569
3570 pcp = &p->pcp;
3571 pcp->count = 0;
3572 pcp->high = 6 * batch;
3573 pcp->batch = max(1UL, 1 * batch);
3574 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3575 INIT_LIST_HEAD(&pcp->lists[migratetype]);
3576 }
3577
3578 /*
3579 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3580 * to the value high for the pageset p.
3581 */
3582
3583 static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3584 unsigned long high)
3585 {
3586 struct per_cpu_pages *pcp;
3587
3588 pcp = &p->pcp;
3589 pcp->high = high;
3590 pcp->batch = max(1UL, high/4);
3591 if ((high/4) > (PAGE_SHIFT * 8))
3592 pcp->batch = PAGE_SHIFT * 8;
3593 }
3594
3595 static void setup_zone_pageset(struct zone *zone)
3596 {
3597 int cpu;
3598
3599 zone->pageset = alloc_percpu(struct per_cpu_pageset);
3600
3601 for_each_possible_cpu(cpu) {
3602 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3603
3604 setup_pageset(pcp, zone_batchsize(zone));
3605
3606 if (percpu_pagelist_fraction)
3607 setup_pagelist_highmark(pcp,
3608 (zone->present_pages /
3609 percpu_pagelist_fraction));
3610 }
3611 }
3612
3613 /*
3614 * Allocate per cpu pagesets and initialize them.
3615 * Before this call only boot pagesets were available.
3616 */
3617 void __init setup_per_cpu_pageset(void)
3618 {
3619 struct zone *zone;
3620
3621 for_each_populated_zone(zone)
3622 setup_zone_pageset(zone);
3623 }
3624
3625 static noinline __init_refok
3626 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3627 {
3628 int i;
3629 struct pglist_data *pgdat = zone->zone_pgdat;
3630 size_t alloc_size;
3631
3632 /*
3633 * The per-page waitqueue mechanism uses hashed waitqueues
3634 * per zone.
3635 */
3636 zone->wait_table_hash_nr_entries =
3637 wait_table_hash_nr_entries(zone_size_pages);
3638 zone->wait_table_bits =
3639 wait_table_bits(zone->wait_table_hash_nr_entries);
3640 alloc_size = zone->wait_table_hash_nr_entries
3641 * sizeof(wait_queue_head_t);
3642
3643 if (!slab_is_available()) {
3644 zone->wait_table = (wait_queue_head_t *)
3645 alloc_bootmem_node_nopanic(pgdat, alloc_size);
3646 } else {
3647 /*
3648 * This case means that a zone whose size was 0 gets new memory
3649 * via memory hot-add.
3650 * But it may be the case that a new node was hot-added. In
3651 * this case vmalloc() will not be able to use this new node's
3652 * memory - this wait_table must be initialized to use this new
3653 * node itself as well.
3654 * To use this new node's memory, further consideration will be
3655 * necessary.
3656 */
3657 zone->wait_table = vmalloc(alloc_size);
3658 }
3659 if (!zone->wait_table)
3660 return -ENOMEM;
3661
3662 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3663 init_waitqueue_head(zone->wait_table + i);
3664
3665 return 0;
3666 }
3667
3668 static int __zone_pcp_update(void *data)
3669 {
3670 struct zone *zone = data;
3671 int cpu;
3672 unsigned long batch = zone_batchsize(zone), flags;
3673
3674 for_each_possible_cpu(cpu) {
3675 struct per_cpu_pageset *pset;
3676 struct per_cpu_pages *pcp;
3677
3678 pset = per_cpu_ptr(zone->pageset, cpu);
3679 pcp = &pset->pcp;
3680
3681 local_irq_save(flags);
3682 free_pcppages_bulk(zone, pcp->count, pcp);
3683 setup_pageset(pset, batch);
3684 local_irq_restore(flags);
3685 }
3686 return 0;
3687 }
3688
3689 void zone_pcp_update(struct zone *zone)
3690 {
3691 stop_machine(__zone_pcp_update, zone, NULL);
3692 }
3693
3694 static __meminit void zone_pcp_init(struct zone *zone)
3695 {
3696 /*
3697 * per cpu subsystem is not up at this point. The following code
3698 * relies on the ability of the linker to provide the
3699 * offset of a (static) per cpu variable into the per cpu area.
3700 */
3701 zone->pageset = &boot_pageset;
3702
3703 if (zone->present_pages)
3704 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
3705 zone->name, zone->present_pages,
3706 zone_batchsize(zone));
3707 }
3708
3709 __meminit int init_currently_empty_zone(struct zone *zone,
3710 unsigned long zone_start_pfn,
3711 unsigned long size,
3712 enum memmap_context context)
3713 {
3714 struct pglist_data *pgdat = zone->zone_pgdat;
3715 int ret;
3716 ret = zone_wait_table_init(zone, size);
3717 if (ret)
3718 return ret;
3719 pgdat->nr_zones = zone_idx(zone) + 1;
3720
3721 zone->zone_start_pfn = zone_start_pfn;
3722
3723 mminit_dprintk(MMINIT_TRACE, "memmap_init",
3724 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
3725 pgdat->node_id,
3726 (unsigned long)zone_idx(zone),
3727 zone_start_pfn, (zone_start_pfn + size));
3728
3729 zone_init_free_lists(zone);
3730
3731 return 0;
3732 }
3733
3734 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3735 /*
3736 * Basic iterator support. Return the first range of PFNs for a node
3737 * Note: nid == MAX_NUMNODES returns first region regardless of node
3738 */
3739 static int __meminit first_active_region_index_in_nid(int nid)
3740 {
3741 int i;
3742
3743 for (i = 0; i < nr_nodemap_entries; i++)
3744 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3745 return i;
3746
3747 return -1;
3748 }
3749
3750 /*
3751 * Basic iterator support. Return the next active range of PFNs for a node
3752 * Note: nid == MAX_NUMNODES returns next region regardless of node
3753 */
3754 static int __meminit next_active_region_index_in_nid(int index, int nid)
3755 {
3756 for (index = index + 1; index < nr_nodemap_entries; index++)
3757 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3758 return index;
3759
3760 return -1;
3761 }
3762
3763 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3764 /*
3765 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3766 * Architectures may implement their own version but if add_active_range()
3767 * was used and there are no special requirements, this is a convenient
3768 * alternative
3769 */
3770 int __meminit __early_pfn_to_nid(unsigned long pfn)
3771 {
3772 int i;
3773
3774 for (i = 0; i < nr_nodemap_entries; i++) {
3775 unsigned long start_pfn = early_node_map[i].start_pfn;
3776 unsigned long end_pfn = early_node_map[i].end_pfn;
3777
3778 if (start_pfn <= pfn && pfn < end_pfn)
3779 return early_node_map[i].nid;
3780 }
3781 /* This is a memory hole */
3782 return -1;
3783 }
3784 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3785
3786 int __meminit early_pfn_to_nid(unsigned long pfn)
3787 {
3788 int nid;
3789
3790 nid = __early_pfn_to_nid(pfn);
3791 if (nid >= 0)
3792 return nid;
3793 /* just returns 0 */
3794 return 0;
3795 }
3796
3797 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
3798 bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3799 {
3800 int nid;
3801
3802 nid = __early_pfn_to_nid(pfn);
3803 if (nid >= 0 && nid != node)
3804 return false;
3805 return true;
3806 }
3807 #endif
3808
3809 /* Basic iterator support to walk early_node_map[] */
3810 #define for_each_active_range_index_in_nid(i, nid) \
3811 for (i = first_active_region_index_in_nid(nid); i != -1; \
3812 i = next_active_region_index_in_nid(i, nid))
3813
3814 /**
3815 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3816 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3817 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3818 *
3819 * If an architecture guarantees that all ranges registered with
3820 * add_active_ranges() contain no holes and may be freed, this
3821 * this function may be used instead of calling free_bootmem() manually.
3822 */
3823 void __init free_bootmem_with_active_regions(int nid,
3824 unsigned long max_low_pfn)
3825 {
3826 int i;
3827
3828 for_each_active_range_index_in_nid(i, nid) {
3829 unsigned long size_pages = 0;
3830 unsigned long end_pfn = early_node_map[i].end_pfn;
3831
3832 if (early_node_map[i].start_pfn >= max_low_pfn)
3833 continue;
3834
3835 if (end_pfn > max_low_pfn)
3836 end_pfn = max_low_pfn;
3837
3838 size_pages = end_pfn - early_node_map[i].start_pfn;
3839 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3840 PFN_PHYS(early_node_map[i].start_pfn),
3841 size_pages << PAGE_SHIFT);
3842 }
3843 }
3844
3845 #ifdef CONFIG_HAVE_MEMBLOCK
3846 /*
3847 * Basic iterator support. Return the last range of PFNs for a node
3848 * Note: nid == MAX_NUMNODES returns last region regardless of node
3849 */
3850 static int __meminit last_active_region_index_in_nid(int nid)
3851 {
3852 int i;
3853
3854 for (i = nr_nodemap_entries - 1; i >= 0; i--)
3855 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3856 return i;
3857
3858 return -1;
3859 }
3860
3861 /*
3862 * Basic iterator support. Return the previous active range of PFNs for a node
3863 * Note: nid == MAX_NUMNODES returns next region regardless of node
3864 */
3865 static int __meminit previous_active_region_index_in_nid(int index, int nid)
3866 {
3867 for (index = index - 1; index >= 0; index--)
3868 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3869 return index;
3870
3871 return -1;
3872 }
3873
3874 #define for_each_active_range_index_in_nid_reverse(i, nid) \
3875 for (i = last_active_region_index_in_nid(nid); i != -1; \
3876 i = previous_active_region_index_in_nid(i, nid))
3877
3878 u64 __init find_memory_core_early(int nid, u64 size, u64 align,
3879 u64 goal, u64 limit)
3880 {
3881 int i;
3882
3883 /* Need to go over early_node_map to find out good range for node */
3884 for_each_active_range_index_in_nid_reverse(i, nid) {
3885 u64 addr;
3886 u64 ei_start, ei_last;
3887 u64 final_start, final_end;
3888
3889 ei_last = early_node_map[i].end_pfn;
3890 ei_last <<= PAGE_SHIFT;
3891 ei_start = early_node_map[i].start_pfn;
3892 ei_start <<= PAGE_SHIFT;
3893
3894 final_start = max(ei_start, goal);
3895 final_end = min(ei_last, limit);
3896
3897 if (final_start >= final_end)
3898 continue;
3899
3900 addr = memblock_find_in_range(final_start, final_end, size, align);
3901
3902 if (addr == MEMBLOCK_ERROR)
3903 continue;
3904
3905 return addr;
3906 }
3907
3908 return MEMBLOCK_ERROR;
3909 }
3910 #endif
3911
3912 int __init add_from_early_node_map(struct range *range, int az,
3913 int nr_range, int nid)
3914 {
3915 int i;
3916 u64 start, end;
3917
3918 /* need to go over early_node_map to find out good range for node */
3919 for_each_active_range_index_in_nid(i, nid) {
3920 start = early_node_map[i].start_pfn;
3921 end = early_node_map[i].end_pfn;
3922 nr_range = add_range(range, az, nr_range, start, end);
3923 }
3924 return nr_range;
3925 }
3926
3927 void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3928 {
3929 int i;
3930 int ret;
3931
3932 for_each_active_range_index_in_nid(i, nid) {
3933 ret = work_fn(early_node_map[i].start_pfn,
3934 early_node_map[i].end_pfn, data);
3935 if (ret)
3936 break;
3937 }
3938 }
3939 /**
3940 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3941 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3942 *
3943 * If an architecture guarantees that all ranges registered with
3944 * add_active_ranges() contain no holes and may be freed, this
3945 * function may be used instead of calling memory_present() manually.
3946 */
3947 void __init sparse_memory_present_with_active_regions(int nid)
3948 {
3949 int i;
3950
3951 for_each_active_range_index_in_nid(i, nid)
3952 memory_present(early_node_map[i].nid,
3953 early_node_map[i].start_pfn,
3954 early_node_map[i].end_pfn);
3955 }
3956
3957 /**
3958 * get_pfn_range_for_nid - Return the start and end page frames for a node
3959 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3960 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3961 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3962 *
3963 * It returns the start and end page frame of a node based on information
3964 * provided by an arch calling add_active_range(). If called for a node
3965 * with no available memory, a warning is printed and the start and end
3966 * PFNs will be 0.
3967 */
3968 void __meminit get_pfn_range_for_nid(unsigned int nid,
3969 unsigned long *start_pfn, unsigned long *end_pfn)
3970 {
3971 int i;
3972 *start_pfn = -1UL;
3973 *end_pfn = 0;
3974
3975 for_each_active_range_index_in_nid(i, nid) {
3976 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3977 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3978 }
3979
3980 if (*start_pfn == -1UL)
3981 *start_pfn = 0;
3982 }
3983
3984 /*
3985 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3986 * assumption is made that zones within a node are ordered in monotonic
3987 * increasing memory addresses so that the "highest" populated zone is used
3988 */
3989 static void __init find_usable_zone_for_movable(void)
3990 {
3991 int zone_index;
3992 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3993 if (zone_index == ZONE_MOVABLE)
3994 continue;
3995
3996 if (arch_zone_highest_possible_pfn[zone_index] >
3997 arch_zone_lowest_possible_pfn[zone_index])
3998 break;
3999 }
4000
4001 VM_BUG_ON(zone_index == -1);
4002 movable_zone = zone_index;
4003 }
4004
4005 /*
4006 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
4007 * because it is sized independent of architecture. Unlike the other zones,
4008 * the starting point for ZONE_MOVABLE is not fixed. It may be different
4009 * in each node depending on the size of each node and how evenly kernelcore
4010 * is distributed. This helper function adjusts the zone ranges
4011 * provided by the architecture for a given node by using the end of the
4012 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4013 * zones within a node are in order of monotonic increases memory addresses
4014 */
4015 static void __meminit adjust_zone_range_for_zone_movable(int nid,
4016 unsigned long zone_type,
4017 unsigned long node_start_pfn,
4018 unsigned long node_end_pfn,
4019 unsigned long *zone_start_pfn,
4020 unsigned long *zone_end_pfn)
4021 {
4022 /* Only adjust if ZONE_MOVABLE is on this node */
4023 if (zone_movable_pfn[nid]) {
4024 /* Size ZONE_MOVABLE */
4025 if (zone_type == ZONE_MOVABLE) {
4026 *zone_start_pfn = zone_movable_pfn[nid];
4027 *zone_end_pfn = min(node_end_pfn,
4028 arch_zone_highest_possible_pfn[movable_zone]);
4029
4030 /* Adjust for ZONE_MOVABLE starting within this range */
4031 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
4032 *zone_end_pfn > zone_movable_pfn[nid]) {
4033 *zone_end_pfn = zone_movable_pfn[nid];
4034
4035 /* Check if this whole range is within ZONE_MOVABLE */
4036 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
4037 *zone_start_pfn = *zone_end_pfn;
4038 }
4039 }
4040
4041 /*
4042 * Return the number of pages a zone spans in a node, including holes
4043 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
4044 */
4045 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
4046 unsigned long zone_type,
4047 unsigned long *ignored)
4048 {
4049 unsigned long node_start_pfn, node_end_pfn;
4050 unsigned long zone_start_pfn, zone_end_pfn;
4051
4052 /* Get the start and end of the node and zone */
4053 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4054 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4055 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
4056 adjust_zone_range_for_zone_movable(nid, zone_type,
4057 node_start_pfn, node_end_pfn,
4058 &zone_start_pfn, &zone_end_pfn);
4059
4060 /* Check that this node has pages within the zone's required range */
4061 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4062 return 0;
4063
4064 /* Move the zone boundaries inside the node if necessary */
4065 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4066 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4067
4068 /* Return the spanned pages */
4069 return zone_end_pfn - zone_start_pfn;
4070 }
4071
4072 /*
4073 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
4074 * then all holes in the requested range will be accounted for.
4075 */
4076 unsigned long __meminit __absent_pages_in_range(int nid,
4077 unsigned long range_start_pfn,
4078 unsigned long range_end_pfn)
4079 {
4080 int i = 0;
4081 unsigned long prev_end_pfn = 0, hole_pages = 0;
4082 unsigned long start_pfn;
4083
4084 /* Find the end_pfn of the first active range of pfns in the node */
4085 i = first_active_region_index_in_nid(nid);
4086 if (i == -1)
4087 return 0;
4088
4089 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
4090
4091 /* Account for ranges before physical memory on this node */
4092 if (early_node_map[i].start_pfn > range_start_pfn)
4093 hole_pages = prev_end_pfn - range_start_pfn;
4094
4095 /* Find all holes for the zone within the node */
4096 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
4097
4098 /* No need to continue if prev_end_pfn is outside the zone */
4099 if (prev_end_pfn >= range_end_pfn)
4100 break;
4101
4102 /* Make sure the end of the zone is not within the hole */
4103 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
4104 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
4105
4106 /* Update the hole size cound and move on */
4107 if (start_pfn > range_start_pfn) {
4108 BUG_ON(prev_end_pfn > start_pfn);
4109 hole_pages += start_pfn - prev_end_pfn;
4110 }
4111 prev_end_pfn = early_node_map[i].end_pfn;
4112 }
4113
4114 /* Account for ranges past physical memory on this node */
4115 if (range_end_pfn > prev_end_pfn)
4116 hole_pages += range_end_pfn -
4117 max(range_start_pfn, prev_end_pfn);
4118
4119 return hole_pages;
4120 }
4121
4122 /**
4123 * absent_pages_in_range - Return number of page frames in holes within a range
4124 * @start_pfn: The start PFN to start searching for holes
4125 * @end_pfn: The end PFN to stop searching for holes
4126 *
4127 * It returns the number of pages frames in memory holes within a range.
4128 */
4129 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4130 unsigned long end_pfn)
4131 {
4132 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4133 }
4134
4135 /* Return the number of page frames in holes in a zone on a node */
4136 static unsigned long __meminit zone_absent_pages_in_node(int nid,
4137 unsigned long zone_type,
4138 unsigned long *ignored)
4139 {
4140 unsigned long node_start_pfn, node_end_pfn;
4141 unsigned long zone_start_pfn, zone_end_pfn;
4142
4143 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4144 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
4145 node_start_pfn);
4146 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
4147 node_end_pfn);
4148
4149 adjust_zone_range_for_zone_movable(nid, zone_type,
4150 node_start_pfn, node_end_pfn,
4151 &zone_start_pfn, &zone_end_pfn);
4152 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
4153 }
4154
4155 #else
4156 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
4157 unsigned long zone_type,
4158 unsigned long *zones_size)
4159 {
4160 return zones_size[zone_type];
4161 }
4162
4163 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
4164 unsigned long zone_type,
4165 unsigned long *zholes_size)
4166 {
4167 if (!zholes_size)
4168 return 0;
4169
4170 return zholes_size[zone_type];
4171 }
4172
4173 #endif
4174
4175 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4176 unsigned long *zones_size, unsigned long *zholes_size)
4177 {
4178 unsigned long realtotalpages, totalpages = 0;
4179 enum zone_type i;
4180
4181 for (i = 0; i < MAX_NR_ZONES; i++)
4182 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4183 zones_size);
4184 pgdat->node_spanned_pages = totalpages;
4185
4186 realtotalpages = totalpages;
4187 for (i = 0; i < MAX_NR_ZONES; i++)
4188 realtotalpages -=
4189 zone_absent_pages_in_node(pgdat->node_id, i,
4190 zholes_size);
4191 pgdat->node_present_pages = realtotalpages;
4192 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4193 realtotalpages);
4194 }
4195
4196 #ifndef CONFIG_SPARSEMEM
4197 /*
4198 * Calculate the size of the zone->blockflags rounded to an unsigned long
4199 * Start by making sure zonesize is a multiple of pageblock_order by rounding
4200 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
4201 * round what is now in bits to nearest long in bits, then return it in
4202 * bytes.
4203 */
4204 static unsigned long __init usemap_size(unsigned long zonesize)
4205 {
4206 unsigned long usemapsize;
4207
4208 usemapsize = roundup(zonesize, pageblock_nr_pages);
4209 usemapsize = usemapsize >> pageblock_order;
4210 usemapsize *= NR_PAGEBLOCK_BITS;
4211 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4212
4213 return usemapsize / 8;
4214 }
4215
4216 static void __init setup_usemap(struct pglist_data *pgdat,
4217 struct zone *zone, unsigned long zonesize)
4218 {
4219 unsigned long usemapsize = usemap_size(zonesize);
4220 zone->pageblock_flags = NULL;
4221 if (usemapsize)
4222 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4223 usemapsize);
4224 }
4225 #else
4226 static inline void setup_usemap(struct pglist_data *pgdat,
4227 struct zone *zone, unsigned long zonesize) {}
4228 #endif /* CONFIG_SPARSEMEM */
4229
4230 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4231
4232 /* Return a sensible default order for the pageblock size. */
4233 static inline int pageblock_default_order(void)
4234 {
4235 if (HPAGE_SHIFT > PAGE_SHIFT)
4236 return HUGETLB_PAGE_ORDER;
4237
4238 return MAX_ORDER-1;
4239 }
4240
4241 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4242 static inline void __init set_pageblock_order(unsigned int order)
4243 {
4244 /* Check that pageblock_nr_pages has not already been setup */
4245 if (pageblock_order)
4246 return;
4247
4248 /*
4249 * Assume the largest contiguous order of interest is a huge page.
4250 * This value may be variable depending on boot parameters on IA64
4251 */
4252 pageblock_order = order;
4253 }
4254 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4255
4256 /*
4257 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4258 * and pageblock_default_order() are unused as pageblock_order is set
4259 * at compile-time. See include/linux/pageblock-flags.h for the values of
4260 * pageblock_order based on the kernel config
4261 */
4262 static inline int pageblock_default_order(unsigned int order)
4263 {
4264 return MAX_ORDER-1;
4265 }
4266 #define set_pageblock_order(x) do {} while (0)
4267
4268 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4269
4270 /*
4271 * Set up the zone data structures:
4272 * - mark all pages reserved
4273 * - mark all memory queues empty
4274 * - clear the memory bitmaps
4275 */
4276 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4277 unsigned long *zones_size, unsigned long *zholes_size)
4278 {
4279 enum zone_type j;
4280 int nid = pgdat->node_id;
4281 unsigned long zone_start_pfn = pgdat->node_start_pfn;
4282 int ret;
4283
4284 pgdat_resize_init(pgdat);
4285 pgdat->nr_zones = 0;
4286 init_waitqueue_head(&pgdat->kswapd_wait);
4287 pgdat->kswapd_max_order = 0;
4288 pgdat_page_cgroup_init(pgdat);
4289
4290 for (j = 0; j < MAX_NR_ZONES; j++) {
4291 struct zone *zone = pgdat->node_zones + j;
4292 unsigned long size, realsize, memmap_pages;
4293 enum lru_list l;
4294
4295 size = zone_spanned_pages_in_node(nid, j, zones_size);
4296 realsize = size - zone_absent_pages_in_node(nid, j,
4297 zholes_size);
4298
4299 /*
4300 * Adjust realsize so that it accounts for how much memory
4301 * is used by this zone for memmap. This affects the watermark
4302 * and per-cpu initialisations
4303 */
4304 memmap_pages =
4305 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
4306 if (realsize >= memmap_pages) {
4307 realsize -= memmap_pages;
4308 if (memmap_pages)
4309 printk(KERN_DEBUG
4310 " %s zone: %lu pages used for memmap\n",
4311 zone_names[j], memmap_pages);
4312 } else
4313 printk(KERN_WARNING
4314 " %s zone: %lu pages exceeds realsize %lu\n",
4315 zone_names[j], memmap_pages, realsize);
4316
4317 /* Account for reserved pages */
4318 if (j == 0 && realsize > dma_reserve) {
4319 realsize -= dma_reserve;
4320 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
4321 zone_names[0], dma_reserve);
4322 }
4323
4324 if (!is_highmem_idx(j))
4325 nr_kernel_pages += realsize;
4326 nr_all_pages += realsize;
4327
4328 zone->spanned_pages = size;
4329 zone->present_pages = realsize;
4330 #ifdef CONFIG_NUMA
4331 zone->node = nid;
4332 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
4333 / 100;
4334 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
4335 #endif
4336 zone->name = zone_names[j];
4337 spin_lock_init(&zone->lock);
4338 spin_lock_init(&zone->lru_lock);
4339 zone_seqlock_init(zone);
4340 zone->zone_pgdat = pgdat;
4341
4342 zone_pcp_init(zone);
4343 for_each_lru(l)
4344 INIT_LIST_HEAD(&zone->lru[l].list);
4345 zone->reclaim_stat.recent_rotated[0] = 0;
4346 zone->reclaim_stat.recent_rotated[1] = 0;
4347 zone->reclaim_stat.recent_scanned[0] = 0;
4348 zone->reclaim_stat.recent_scanned[1] = 0;
4349 zap_zone_vm_stats(zone);
4350 zone->flags = 0;
4351 if (!size)
4352 continue;
4353
4354 set_pageblock_order(pageblock_default_order());
4355 setup_usemap(pgdat, zone, size);
4356 ret = init_currently_empty_zone(zone, zone_start_pfn,
4357 size, MEMMAP_EARLY);
4358 BUG_ON(ret);
4359 memmap_init(size, nid, j, zone_start_pfn);
4360 zone_start_pfn += size;
4361 }
4362 }
4363
4364 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4365 {
4366 /* Skip empty nodes */
4367 if (!pgdat->node_spanned_pages)
4368 return;
4369
4370 #ifdef CONFIG_FLAT_NODE_MEM_MAP
4371 /* ia64 gets its own node_mem_map, before this, without bootmem */
4372 if (!pgdat->node_mem_map) {
4373 unsigned long size, start, end;
4374 struct page *map;
4375
4376 /*
4377 * The zone's endpoints aren't required to be MAX_ORDER
4378 * aligned but the node_mem_map endpoints must be in order
4379 * for the buddy allocator to function correctly.
4380 */
4381 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4382 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4383 end = ALIGN(end, MAX_ORDER_NR_PAGES);
4384 size = (end - start) * sizeof(struct page);
4385 map = alloc_remap(pgdat->node_id, size);
4386 if (!map)
4387 map = alloc_bootmem_node_nopanic(pgdat, size);
4388 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4389 }
4390 #ifndef CONFIG_NEED_MULTIPLE_NODES
4391 /*
4392 * With no DISCONTIG, the global mem_map is just set as node 0's
4393 */
4394 if (pgdat == NODE_DATA(0)) {
4395 mem_map = NODE_DATA(0)->node_mem_map;
4396 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4397 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
4398 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
4399 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4400 }
4401 #endif
4402 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
4403 }
4404
4405 void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4406 unsigned long node_start_pfn, unsigned long *zholes_size)
4407 {
4408 pg_data_t *pgdat = NODE_DATA(nid);
4409
4410 pgdat->node_id = nid;
4411 pgdat->node_start_pfn = node_start_pfn;
4412 calculate_node_totalpages(pgdat, zones_size, zholes_size);
4413
4414 alloc_node_mem_map(pgdat);
4415 #ifdef CONFIG_FLAT_NODE_MEM_MAP
4416 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4417 nid, (unsigned long)pgdat,
4418 (unsigned long)pgdat->node_mem_map);
4419 #endif
4420
4421 free_area_init_core(pgdat, zones_size, zholes_size);
4422 }
4423
4424 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4425
4426 #if MAX_NUMNODES > 1
4427 /*
4428 * Figure out the number of possible node ids.
4429 */
4430 static void __init setup_nr_node_ids(void)
4431 {
4432 unsigned int node;
4433 unsigned int highest = 0;
4434
4435 for_each_node_mask(node, node_possible_map)
4436 highest = node;
4437 nr_node_ids = highest + 1;
4438 }
4439 #else
4440 static inline void setup_nr_node_ids(void)
4441 {
4442 }
4443 #endif
4444
4445 /**
4446 * add_active_range - Register a range of PFNs backed by physical memory
4447 * @nid: The node ID the range resides on
4448 * @start_pfn: The start PFN of the available physical memory
4449 * @end_pfn: The end PFN of the available physical memory
4450 *
4451 * These ranges are stored in an early_node_map[] and later used by
4452 * free_area_init_nodes() to calculate zone sizes and holes. If the
4453 * range spans a memory hole, it is up to the architecture to ensure
4454 * the memory is not freed by the bootmem allocator. If possible
4455 * the range being registered will be merged with existing ranges.
4456 */
4457 void __init add_active_range(unsigned int nid, unsigned long start_pfn,
4458 unsigned long end_pfn)
4459 {
4460 int i;
4461
4462 mminit_dprintk(MMINIT_TRACE, "memory_register",
4463 "Entering add_active_range(%d, %#lx, %#lx) "
4464 "%d entries of %d used\n",
4465 nid, start_pfn, end_pfn,
4466 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
4467
4468 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
4469
4470 /* Merge with existing active regions if possible */
4471 for (i = 0; i < nr_nodemap_entries; i++) {
4472 if (early_node_map[i].nid != nid)
4473 continue;
4474
4475 /* Skip if an existing region covers this new one */
4476 if (start_pfn >= early_node_map[i].start_pfn &&
4477 end_pfn <= early_node_map[i].end_pfn)
4478 return;
4479
4480 /* Merge forward if suitable */
4481 if (start_pfn <= early_node_map[i].end_pfn &&
4482 end_pfn > early_node_map[i].end_pfn) {
4483 early_node_map[i].end_pfn = end_pfn;
4484 return;
4485 }
4486
4487 /* Merge backward if suitable */
4488 if (start_pfn < early_node_map[i].start_pfn &&
4489 end_pfn >= early_node_map[i].start_pfn) {
4490 early_node_map[i].start_pfn = start_pfn;
4491 return;
4492 }
4493 }
4494
4495 /* Check that early_node_map is large enough */
4496 if (i >= MAX_ACTIVE_REGIONS) {
4497 printk(KERN_CRIT "More than %d memory regions, truncating\n",
4498 MAX_ACTIVE_REGIONS);
4499 return;
4500 }
4501
4502 early_node_map[i].nid = nid;
4503 early_node_map[i].start_pfn = start_pfn;
4504 early_node_map[i].end_pfn = end_pfn;
4505 nr_nodemap_entries = i + 1;
4506 }
4507
4508 /**
4509 * remove_active_range - Shrink an existing registered range of PFNs
4510 * @nid: The node id the range is on that should be shrunk
4511 * @start_pfn: The new PFN of the range
4512 * @end_pfn: The new PFN of the range
4513 *
4514 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
4515 * The map is kept near the end physical page range that has already been
4516 * registered. This function allows an arch to shrink an existing registered
4517 * range.
4518 */
4519 void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4520 unsigned long end_pfn)
4521 {
4522 int i, j;
4523 int removed = 0;
4524
4525 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4526 nid, start_pfn, end_pfn);
4527
4528 /* Find the old active region end and shrink */
4529 for_each_active_range_index_in_nid(i, nid) {
4530 if (early_node_map[i].start_pfn >= start_pfn &&
4531 early_node_map[i].end_pfn <= end_pfn) {
4532 /* clear it */
4533 early_node_map[i].start_pfn = 0;
4534 early_node_map[i].end_pfn = 0;
4535 removed = 1;
4536 continue;
4537 }
4538 if (early_node_map[i].start_pfn < start_pfn &&
4539 early_node_map[i].end_pfn > start_pfn) {
4540 unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4541 early_node_map[i].end_pfn = start_pfn;
4542 if (temp_end_pfn > end_pfn)
4543 add_active_range(nid, end_pfn, temp_end_pfn);
4544 continue;
4545 }
4546 if (early_node_map[i].start_pfn >= start_pfn &&
4547 early_node_map[i].end_pfn > end_pfn &&
4548 early_node_map[i].start_pfn < end_pfn) {
4549 early_node_map[i].start_pfn = end_pfn;
4550 continue;
4551 }
4552 }
4553
4554 if (!removed)
4555 return;
4556
4557 /* remove the blank ones */
4558 for (i = nr_nodemap_entries - 1; i > 0; i--) {
4559 if (early_node_map[i].nid != nid)
4560 continue;
4561 if (early_node_map[i].end_pfn)
4562 continue;
4563 /* we found it, get rid of it */
4564 for (j = i; j < nr_nodemap_entries - 1; j++)
4565 memcpy(&early_node_map[j], &early_node_map[j+1],
4566 sizeof(early_node_map[j]));
4567 j = nr_nodemap_entries - 1;
4568 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4569 nr_nodemap_entries--;
4570 }
4571 }
4572
4573 /**
4574 * remove_all_active_ranges - Remove all currently registered regions
4575 *
4576 * During discovery, it may be found that a table like SRAT is invalid
4577 * and an alternative discovery method must be used. This function removes
4578 * all currently registered regions.
4579 */
4580 void __init remove_all_active_ranges(void)
4581 {
4582 memset(early_node_map, 0, sizeof(early_node_map));
4583 nr_nodemap_entries = 0;
4584 }
4585
4586 /* Compare two active node_active_regions */
4587 static int __init cmp_node_active_region(const void *a, const void *b)
4588 {
4589 struct node_active_region *arange = (struct node_active_region *)a;
4590 struct node_active_region *brange = (struct node_active_region *)b;
4591
4592 /* Done this way to avoid overflows */
4593 if (arange->start_pfn > brange->start_pfn)
4594 return 1;
4595 if (arange->start_pfn < brange->start_pfn)
4596 return -1;
4597
4598 return 0;
4599 }
4600
4601 /* sort the node_map by start_pfn */
4602 void __init sort_node_map(void)
4603 {
4604 sort(early_node_map, (size_t)nr_nodemap_entries,
4605 sizeof(struct node_active_region),
4606 cmp_node_active_region, NULL);
4607 }
4608
4609 /**
4610 * node_map_pfn_alignment - determine the maximum internode alignment
4611 *
4612 * This function should be called after node map is populated and sorted.
4613 * It calculates the maximum power of two alignment which can distinguish
4614 * all the nodes.
4615 *
4616 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
4617 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
4618 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
4619 * shifted, 1GiB is enough and this function will indicate so.
4620 *
4621 * This is used to test whether pfn -> nid mapping of the chosen memory
4622 * model has fine enough granularity to avoid incorrect mapping for the
4623 * populated node map.
4624 *
4625 * Returns the determined alignment in pfn's. 0 if there is no alignment
4626 * requirement (single node).
4627 */
4628 unsigned long __init node_map_pfn_alignment(void)
4629 {
4630 unsigned long accl_mask = 0, last_end = 0;
4631 int last_nid = -1;
4632 int i;
4633
4634 for_each_active_range_index_in_nid(i, MAX_NUMNODES) {
4635 int nid = early_node_map[i].nid;
4636 unsigned long start = early_node_map[i].start_pfn;
4637 unsigned long end = early_node_map[i].end_pfn;
4638 unsigned long mask;
4639
4640 if (!start || last_nid < 0 || last_nid == nid) {
4641 last_nid = nid;
4642 last_end = end;
4643 continue;
4644 }
4645
4646 /*
4647 * Start with a mask granular enough to pin-point to the
4648 * start pfn and tick off bits one-by-one until it becomes
4649 * too coarse to separate the current node from the last.
4650 */
4651 mask = ~((1 << __ffs(start)) - 1);
4652 while (mask && last_end <= (start & (mask << 1)))
4653 mask <<= 1;
4654
4655 /* accumulate all internode masks */
4656 accl_mask |= mask;
4657 }
4658
4659 /* convert mask to number of pages */
4660 return ~accl_mask + 1;
4661 }
4662
4663 /* Find the lowest pfn for a node */
4664 static unsigned long __init find_min_pfn_for_node(int nid)
4665 {
4666 int i;
4667 unsigned long min_pfn = ULONG_MAX;
4668
4669 /* Assuming a sorted map, the first range found has the starting pfn */
4670 for_each_active_range_index_in_nid(i, nid)
4671 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4672
4673 if (min_pfn == ULONG_MAX) {
4674 printk(KERN_WARNING
4675 "Could not find start_pfn for node %d\n", nid);
4676 return 0;
4677 }
4678
4679 return min_pfn;
4680 }
4681
4682 /**
4683 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4684 *
4685 * It returns the minimum PFN based on information provided via
4686 * add_active_range().
4687 */
4688 unsigned long __init find_min_pfn_with_active_regions(void)
4689 {
4690 return find_min_pfn_for_node(MAX_NUMNODES);
4691 }
4692
4693 /*
4694 * early_calculate_totalpages()
4695 * Sum pages in active regions for movable zone.
4696 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4697 */
4698 static unsigned long __init early_calculate_totalpages(void)
4699 {
4700 int i;
4701 unsigned long totalpages = 0;
4702
4703 for (i = 0; i < nr_nodemap_entries; i++) {
4704 unsigned long pages = early_node_map[i].end_pfn -
4705 early_node_map[i].start_pfn;
4706 totalpages += pages;
4707 if (pages)
4708 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4709 }
4710 return totalpages;
4711 }
4712
4713 /*
4714 * Find the PFN the Movable zone begins in each node. Kernel memory
4715 * is spread evenly between nodes as long as the nodes have enough
4716 * memory. When they don't, some nodes will have more kernelcore than
4717 * others
4718 */
4719 static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4720 {
4721 int i, nid;
4722 unsigned long usable_startpfn;
4723 unsigned long kernelcore_node, kernelcore_remaining;
4724 /* save the state before borrow the nodemask */
4725 nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4726 unsigned long totalpages = early_calculate_totalpages();
4727 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4728
4729 /*
4730 * If movablecore was specified, calculate what size of
4731 * kernelcore that corresponds so that memory usable for
4732 * any allocation type is evenly spread. If both kernelcore
4733 * and movablecore are specified, then the value of kernelcore
4734 * will be used for required_kernelcore if it's greater than
4735 * what movablecore would have allowed.
4736 */
4737 if (required_movablecore) {
4738 unsigned long corepages;
4739
4740 /*
4741 * Round-up so that ZONE_MOVABLE is at least as large as what
4742 * was requested by the user
4743 */
4744 required_movablecore =
4745 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4746 corepages = totalpages - required_movablecore;
4747
4748 required_kernelcore = max(required_kernelcore, corepages);
4749 }
4750
4751 /* If kernelcore was not specified, there is no ZONE_MOVABLE */
4752 if (!required_kernelcore)
4753 goto out;
4754
4755 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4756 find_usable_zone_for_movable();
4757 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4758
4759 restart:
4760 /* Spread kernelcore memory as evenly as possible throughout nodes */
4761 kernelcore_node = required_kernelcore / usable_nodes;
4762 for_each_node_state(nid, N_HIGH_MEMORY) {
4763 /*
4764 * Recalculate kernelcore_node if the division per node
4765 * now exceeds what is necessary to satisfy the requested
4766 * amount of memory for the kernel
4767 */
4768 if (required_kernelcore < kernelcore_node)
4769 kernelcore_node = required_kernelcore / usable_nodes;
4770
4771 /*
4772 * As the map is walked, we track how much memory is usable
4773 * by the kernel using kernelcore_remaining. When it is
4774 * 0, the rest of the node is usable by ZONE_MOVABLE
4775 */
4776 kernelcore_remaining = kernelcore_node;
4777
4778 /* Go through each range of PFNs within this node */
4779 for_each_active_range_index_in_nid(i, nid) {
4780 unsigned long start_pfn, end_pfn;
4781 unsigned long size_pages;
4782
4783 start_pfn = max(early_node_map[i].start_pfn,
4784 zone_movable_pfn[nid]);
4785 end_pfn = early_node_map[i].end_pfn;
4786 if (start_pfn >= end_pfn)
4787 continue;
4788
4789 /* Account for what is only usable for kernelcore */
4790 if (start_pfn < usable_startpfn) {
4791 unsigned long kernel_pages;
4792 kernel_pages = min(end_pfn, usable_startpfn)
4793 - start_pfn;
4794
4795 kernelcore_remaining -= min(kernel_pages,
4796 kernelcore_remaining);
4797 required_kernelcore -= min(kernel_pages,
4798 required_kernelcore);
4799
4800 /* Continue if range is now fully accounted */
4801 if (end_pfn <= usable_startpfn) {
4802
4803 /*
4804 * Push zone_movable_pfn to the end so
4805 * that if we have to rebalance
4806 * kernelcore across nodes, we will
4807 * not double account here
4808 */
4809 zone_movable_pfn[nid] = end_pfn;
4810 continue;
4811 }
4812 start_pfn = usable_startpfn;
4813 }
4814
4815 /*
4816 * The usable PFN range for ZONE_MOVABLE is from
4817 * start_pfn->end_pfn. Calculate size_pages as the
4818 * number of pages used as kernelcore
4819 */
4820 size_pages = end_pfn - start_pfn;
4821 if (size_pages > kernelcore_remaining)
4822 size_pages = kernelcore_remaining;
4823 zone_movable_pfn[nid] = start_pfn + size_pages;
4824
4825 /*
4826 * Some kernelcore has been met, update counts and
4827 * break if the kernelcore for this node has been
4828 * satisified
4829 */
4830 required_kernelcore -= min(required_kernelcore,
4831 size_pages);
4832 kernelcore_remaining -= size_pages;
4833 if (!kernelcore_remaining)
4834 break;
4835 }
4836 }
4837
4838 /*
4839 * If there is still required_kernelcore, we do another pass with one
4840 * less node in the count. This will push zone_movable_pfn[nid] further
4841 * along on the nodes that still have memory until kernelcore is
4842 * satisified
4843 */
4844 usable_nodes--;
4845 if (usable_nodes && required_kernelcore > usable_nodes)
4846 goto restart;
4847
4848 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4849 for (nid = 0; nid < MAX_NUMNODES; nid++)
4850 zone_movable_pfn[nid] =
4851 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4852
4853 out:
4854 /* restore the node_state */
4855 node_states[N_HIGH_MEMORY] = saved_node_state;
4856 }
4857
4858 /* Any regular memory on that node ? */
4859 static void check_for_regular_memory(pg_data_t *pgdat)
4860 {
4861 #ifdef CONFIG_HIGHMEM
4862 enum zone_type zone_type;
4863
4864 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4865 struct zone *zone = &pgdat->node_zones[zone_type];
4866 if (zone->present_pages)
4867 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4868 }
4869 #endif
4870 }
4871
4872 /**
4873 * free_area_init_nodes - Initialise all pg_data_t and zone data
4874 * @max_zone_pfn: an array of max PFNs for each zone
4875 *
4876 * This will call free_area_init_node() for each active node in the system.
4877 * Using the page ranges provided by add_active_range(), the size of each
4878 * zone in each node and their holes is calculated. If the maximum PFN
4879 * between two adjacent zones match, it is assumed that the zone is empty.
4880 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4881 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4882 * starts where the previous one ended. For example, ZONE_DMA32 starts
4883 * at arch_max_dma_pfn.
4884 */
4885 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4886 {
4887 unsigned long nid;
4888 int i;
4889
4890 /* Sort early_node_map as initialisation assumes it is sorted */
4891 sort_node_map();
4892
4893 /* Record where the zone boundaries are */
4894 memset(arch_zone_lowest_possible_pfn, 0,
4895 sizeof(arch_zone_lowest_possible_pfn));
4896 memset(arch_zone_highest_possible_pfn, 0,
4897 sizeof(arch_zone_highest_possible_pfn));
4898 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4899 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4900 for (i = 1; i < MAX_NR_ZONES; i++) {
4901 if (i == ZONE_MOVABLE)
4902 continue;
4903 arch_zone_lowest_possible_pfn[i] =
4904 arch_zone_highest_possible_pfn[i-1];
4905 arch_zone_highest_possible_pfn[i] =
4906 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4907 }
4908 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4909 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4910
4911 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
4912 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4913 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4914
4915 /* Print out the zone ranges */
4916 printk("Zone PFN ranges:\n");
4917 for (i = 0; i < MAX_NR_ZONES; i++) {
4918 if (i == ZONE_MOVABLE)
4919 continue;
4920 printk(" %-8s ", zone_names[i]);
4921 if (arch_zone_lowest_possible_pfn[i] ==
4922 arch_zone_highest_possible_pfn[i])
4923 printk("empty\n");
4924 else
4925 printk("%0#10lx -> %0#10lx\n",
4926 arch_zone_lowest_possible_pfn[i],
4927 arch_zone_highest_possible_pfn[i]);
4928 }
4929
4930 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
4931 printk("Movable zone start PFN for each node\n");
4932 for (i = 0; i < MAX_NUMNODES; i++) {
4933 if (zone_movable_pfn[i])
4934 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
4935 }
4936
4937 /* Print out the early_node_map[] */
4938 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4939 for (i = 0; i < nr_nodemap_entries; i++)
4940 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4941 early_node_map[i].start_pfn,
4942 early_node_map[i].end_pfn);
4943
4944 /* Initialise every node */
4945 mminit_verify_pageflags_layout();
4946 setup_nr_node_ids();
4947 for_each_online_node(nid) {
4948 pg_data_t *pgdat = NODE_DATA(nid);
4949 free_area_init_node(nid, NULL,
4950 find_min_pfn_for_node(nid), NULL);
4951
4952 /* Any memory on that node */
4953 if (pgdat->node_present_pages)
4954 node_set_state(nid, N_HIGH_MEMORY);
4955 check_for_regular_memory(pgdat);
4956 }
4957 }
4958
4959 static int __init cmdline_parse_core(char *p, unsigned long *core)
4960 {
4961 unsigned long long coremem;
4962 if (!p)
4963 return -EINVAL;
4964
4965 coremem = memparse(p, &p);
4966 *core = coremem >> PAGE_SHIFT;
4967
4968 /* Paranoid check that UL is enough for the coremem value */
4969 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4970
4971 return 0;
4972 }
4973
4974 /*
4975 * kernelcore=size sets the amount of memory for use for allocations that
4976 * cannot be reclaimed or migrated.
4977 */
4978 static int __init cmdline_parse_kernelcore(char *p)
4979 {
4980 return cmdline_parse_core(p, &required_kernelcore);
4981 }
4982
4983 /*
4984 * movablecore=size sets the amount of memory for use for allocations that
4985 * can be reclaimed or migrated.
4986 */
4987 static int __init cmdline_parse_movablecore(char *p)
4988 {
4989 return cmdline_parse_core(p, &required_movablecore);
4990 }
4991
4992 early_param("kernelcore", cmdline_parse_kernelcore);
4993 early_param("movablecore", cmdline_parse_movablecore);
4994
4995 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4996
4997 /**
4998 * set_dma_reserve - set the specified number of pages reserved in the first zone
4999 * @new_dma_reserve: The number of pages to mark reserved
5000 *
5001 * The per-cpu batchsize and zone watermarks are determined by present_pages.
5002 * In the DMA zone, a significant percentage may be consumed by kernel image
5003 * and other unfreeable allocations which can skew the watermarks badly. This
5004 * function may optionally be used to account for unfreeable pages in the
5005 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
5006 * smaller per-cpu batchsize.
5007 */
5008 void __init set_dma_reserve(unsigned long new_dma_reserve)
5009 {
5010 dma_reserve = new_dma_reserve;
5011 }
5012
5013 void __init free_area_init(unsigned long *zones_size)
5014 {
5015 free_area_init_node(0, zones_size,
5016 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
5017 }
5018
5019 static int page_alloc_cpu_notify(struct notifier_block *self,
5020 unsigned long action, void *hcpu)
5021 {
5022 int cpu = (unsigned long)hcpu;
5023
5024 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
5025 drain_pages(cpu);
5026
5027 /*
5028 * Spill the event counters of the dead processor
5029 * into the current processors event counters.
5030 * This artificially elevates the count of the current
5031 * processor.
5032 */
5033 vm_events_fold_cpu(cpu);
5034
5035 /*
5036 * Zero the differential counters of the dead processor
5037 * so that the vm statistics are consistent.
5038 *
5039 * This is only okay since the processor is dead and cannot
5040 * race with what we are doing.
5041 */
5042 refresh_cpu_vm_stats(cpu);
5043 }
5044 return NOTIFY_OK;
5045 }
5046
5047 void __init page_alloc_init(void)
5048 {
5049 hotcpu_notifier(page_alloc_cpu_notify, 0);
5050 }
5051
5052 /*
5053 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
5054 * or min_free_kbytes changes.
5055 */
5056 static void calculate_totalreserve_pages(void)
5057 {
5058 struct pglist_data *pgdat;
5059 unsigned long reserve_pages = 0;
5060 enum zone_type i, j;
5061
5062 for_each_online_pgdat(pgdat) {
5063 for (i = 0; i < MAX_NR_ZONES; i++) {
5064 struct zone *zone = pgdat->node_zones + i;
5065 unsigned long max = 0;
5066
5067 /* Find valid and maximum lowmem_reserve in the zone */
5068 for (j = i; j < MAX_NR_ZONES; j++) {
5069 if (zone->lowmem_reserve[j] > max)
5070 max = zone->lowmem_reserve[j];
5071 }
5072
5073 /* we treat the high watermark as reserved pages. */
5074 max += high_wmark_pages(zone);
5075
5076 if (max > zone->present_pages)
5077 max = zone->present_pages;
5078 reserve_pages += max;
5079 }
5080 }
5081 totalreserve_pages = reserve_pages;
5082 }
5083
5084 /*
5085 * setup_per_zone_lowmem_reserve - called whenever
5086 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
5087 * has a correct pages reserved value, so an adequate number of
5088 * pages are left in the zone after a successful __alloc_pages().
5089 */
5090 static void setup_per_zone_lowmem_reserve(void)
5091 {
5092 struct pglist_data *pgdat;
5093 enum zone_type j, idx;
5094
5095 for_each_online_pgdat(pgdat) {
5096 for (j = 0; j < MAX_NR_ZONES; j++) {
5097 struct zone *zone = pgdat->node_zones + j;
5098 unsigned long present_pages = zone->present_pages;
5099
5100 zone->lowmem_reserve[j] = 0;
5101
5102 idx = j;
5103 while (idx) {
5104 struct zone *lower_zone;
5105
5106 idx--;
5107
5108 if (sysctl_lowmem_reserve_ratio[idx] < 1)
5109 sysctl_lowmem_reserve_ratio[idx] = 1;
5110
5111 lower_zone = pgdat->node_zones + idx;
5112 lower_zone->lowmem_reserve[j] = present_pages /
5113 sysctl_lowmem_reserve_ratio[idx];
5114 present_pages += lower_zone->present_pages;
5115 }
5116 }
5117 }
5118
5119 /* update totalreserve_pages */
5120 calculate_totalreserve_pages();
5121 }
5122
5123 /**
5124 * setup_per_zone_wmarks - called when min_free_kbytes changes
5125 * or when memory is hot-{added|removed}
5126 *
5127 * Ensures that the watermark[min,low,high] values for each zone are set
5128 * correctly with respect to min_free_kbytes.
5129 */
5130 void setup_per_zone_wmarks(void)
5131 {
5132 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5133 unsigned long lowmem_pages = 0;
5134 struct zone *zone;
5135 unsigned long flags;
5136
5137 /* Calculate total number of !ZONE_HIGHMEM pages */
5138 for_each_zone(zone) {
5139 if (!is_highmem(zone))
5140 lowmem_pages += zone->present_pages;
5141 }
5142
5143 for_each_zone(zone) {
5144 u64 tmp;
5145
5146 spin_lock_irqsave(&zone->lock, flags);
5147 tmp = (u64)pages_min * zone->present_pages;
5148 do_div(tmp, lowmem_pages);
5149 if (is_highmem(zone)) {
5150 /*
5151 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5152 * need highmem pages, so cap pages_min to a small
5153 * value here.
5154 *
5155 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
5156 * deltas controls asynch page reclaim, and so should
5157 * not be capped for highmem.
5158 */
5159 int min_pages;
5160
5161 min_pages = zone->present_pages / 1024;
5162 if (min_pages < SWAP_CLUSTER_MAX)
5163 min_pages = SWAP_CLUSTER_MAX;
5164 if (min_pages > 128)
5165 min_pages = 128;
5166 zone->watermark[WMARK_MIN] = min_pages;
5167 } else {
5168 /*
5169 * If it's a lowmem zone, reserve a number of pages
5170 * proportionate to the zone's size.
5171 */
5172 zone->watermark[WMARK_MIN] = tmp;
5173 }
5174
5175 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
5176 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
5177 setup_zone_migrate_reserve(zone);
5178 spin_unlock_irqrestore(&zone->lock, flags);
5179 }
5180
5181 /* update totalreserve_pages */
5182 calculate_totalreserve_pages();
5183 }
5184
5185 /*
5186 * The inactive anon list should be small enough that the VM never has to
5187 * do too much work, but large enough that each inactive page has a chance
5188 * to be referenced again before it is swapped out.
5189 *
5190 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
5191 * INACTIVE_ANON pages on this zone's LRU, maintained by the
5192 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
5193 * the anonymous pages are kept on the inactive list.
5194 *
5195 * total target max
5196 * memory ratio inactive anon
5197 * -------------------------------------
5198 * 10MB 1 5MB
5199 * 100MB 1 50MB
5200 * 1GB 3 250MB
5201 * 10GB 10 0.9GB
5202 * 100GB 31 3GB
5203 * 1TB 101 10GB
5204 * 10TB 320 32GB
5205 */
5206 static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
5207 {
5208 unsigned int gb, ratio;
5209
5210 /* Zone size in gigabytes */
5211 gb = zone->present_pages >> (30 - PAGE_SHIFT);
5212 if (gb)
5213 ratio = int_sqrt(10 * gb);
5214 else
5215 ratio = 1;
5216
5217 zone->inactive_ratio = ratio;
5218 }
5219
5220 static void __meminit setup_per_zone_inactive_ratio(void)
5221 {
5222 struct zone *zone;
5223
5224 for_each_zone(zone)
5225 calculate_zone_inactive_ratio(zone);
5226 }
5227
5228 /*
5229 * Initialise min_free_kbytes.
5230 *
5231 * For small machines we want it small (128k min). For large machines
5232 * we want it large (64MB max). But it is not linear, because network
5233 * bandwidth does not increase linearly with machine size. We use
5234 *
5235 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5236 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
5237 *
5238 * which yields
5239 *
5240 * 16MB: 512k
5241 * 32MB: 724k
5242 * 64MB: 1024k
5243 * 128MB: 1448k
5244 * 256MB: 2048k
5245 * 512MB: 2896k
5246 * 1024MB: 4096k
5247 * 2048MB: 5792k
5248 * 4096MB: 8192k
5249 * 8192MB: 11584k
5250 * 16384MB: 16384k
5251 */
5252 int __meminit init_per_zone_wmark_min(void)
5253 {
5254 unsigned long lowmem_kbytes;
5255
5256 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5257
5258 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5259 if (min_free_kbytes < 128)
5260 min_free_kbytes = 128;
5261 if (min_free_kbytes > 65536)
5262 min_free_kbytes = 65536;
5263 setup_per_zone_wmarks();
5264 refresh_zone_stat_thresholds();
5265 setup_per_zone_lowmem_reserve();
5266 setup_per_zone_inactive_ratio();
5267 return 0;
5268 }
5269 module_init(init_per_zone_wmark_min)
5270
5271 /*
5272 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5273 * that we can call two helper functions whenever min_free_kbytes
5274 * changes.
5275 */
5276 int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
5277 void __user *buffer, size_t *length, loff_t *ppos)
5278 {
5279 proc_dointvec(table, write, buffer, length, ppos);
5280 if (write)
5281 setup_per_zone_wmarks();
5282 return 0;
5283 }
5284
5285 #ifdef CONFIG_NUMA
5286 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
5287 void __user *buffer, size_t *length, loff_t *ppos)
5288 {
5289 struct zone *zone;
5290 int rc;
5291
5292 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5293 if (rc)
5294 return rc;
5295
5296 for_each_zone(zone)
5297 zone->min_unmapped_pages = (zone->present_pages *
5298 sysctl_min_unmapped_ratio) / 100;
5299 return 0;
5300 }
5301
5302 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
5303 void __user *buffer, size_t *length, loff_t *ppos)
5304 {
5305 struct zone *zone;
5306 int rc;
5307
5308 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5309 if (rc)
5310 return rc;
5311
5312 for_each_zone(zone)
5313 zone->min_slab_pages = (zone->present_pages *
5314 sysctl_min_slab_ratio) / 100;
5315 return 0;
5316 }
5317 #endif
5318
5319 /*
5320 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5321 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5322 * whenever sysctl_lowmem_reserve_ratio changes.
5323 *
5324 * The reserve ratio obviously has absolutely no relation with the
5325 * minimum watermarks. The lowmem reserve ratio can only make sense
5326 * if in function of the boot time zone sizes.
5327 */
5328 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
5329 void __user *buffer, size_t *length, loff_t *ppos)
5330 {
5331 proc_dointvec_minmax(table, write, buffer, length, ppos);
5332 setup_per_zone_lowmem_reserve();
5333 return 0;
5334 }
5335
5336 /*
5337 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5338 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
5339 * can have before it gets flushed back to buddy allocator.
5340 */
5341
5342 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
5343 void __user *buffer, size_t *length, loff_t *ppos)
5344 {
5345 struct zone *zone;
5346 unsigned int cpu;
5347 int ret;
5348
5349 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5350 if (!write || (ret == -EINVAL))
5351 return ret;
5352 for_each_populated_zone(zone) {
5353 for_each_possible_cpu(cpu) {
5354 unsigned long high;
5355 high = zone->present_pages / percpu_pagelist_fraction;
5356 setup_pagelist_highmark(
5357 per_cpu_ptr(zone->pageset, cpu), high);
5358 }
5359 }
5360 return 0;
5361 }
5362
5363 int hashdist = HASHDIST_DEFAULT;
5364
5365 #ifdef CONFIG_NUMA
5366 static int __init set_hashdist(char *str)
5367 {
5368 if (!str)
5369 return 0;
5370 hashdist = simple_strtoul(str, &str, 0);
5371 return 1;
5372 }
5373 __setup("hashdist=", set_hashdist);
5374 #endif
5375
5376 /*
5377 * allocate a large system hash table from bootmem
5378 * - it is assumed that the hash table must contain an exact power-of-2
5379 * quantity of entries
5380 * - limit is the number of hash buckets, not the total allocation size
5381 */
5382 void *__init alloc_large_system_hash(const char *tablename,
5383 unsigned long bucketsize,
5384 unsigned long numentries,
5385 int scale,
5386 int flags,
5387 unsigned int *_hash_shift,
5388 unsigned int *_hash_mask,
5389 unsigned long limit)
5390 {
5391 unsigned long long max = limit;
5392 unsigned long log2qty, size;
5393 void *table = NULL;
5394
5395 /* allow the kernel cmdline to have a say */
5396 if (!numentries) {
5397 /* round applicable memory size up to nearest megabyte */
5398 numentries = nr_kernel_pages;
5399 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5400 numentries >>= 20 - PAGE_SHIFT;
5401 numentries <<= 20 - PAGE_SHIFT;
5402
5403 /* limit to 1 bucket per 2^scale bytes of low memory */
5404 if (scale > PAGE_SHIFT)
5405 numentries >>= (scale - PAGE_SHIFT);
5406 else
5407 numentries <<= (PAGE_SHIFT - scale);
5408
5409 /* Make sure we've got at least a 0-order allocation.. */
5410 if (unlikely(flags & HASH_SMALL)) {
5411 /* Makes no sense without HASH_EARLY */
5412 WARN_ON(!(flags & HASH_EARLY));
5413 if (!(numentries >> *_hash_shift)) {
5414 numentries = 1UL << *_hash_shift;
5415 BUG_ON(!numentries);
5416 }
5417 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
5418 numentries = PAGE_SIZE / bucketsize;
5419 }
5420 numentries = roundup_pow_of_two(numentries);
5421
5422 /* limit allocation size to 1/16 total memory by default */
5423 if (max == 0) {
5424 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5425 do_div(max, bucketsize);
5426 }
5427
5428 if (numentries > max)
5429 numentries = max;
5430
5431 log2qty = ilog2(numentries);
5432
5433 do {
5434 size = bucketsize << log2qty;
5435 if (flags & HASH_EARLY)
5436 table = alloc_bootmem_nopanic(size);
5437 else if (hashdist)
5438 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5439 else {
5440 /*
5441 * If bucketsize is not a power-of-two, we may free
5442 * some pages at the end of hash table which
5443 * alloc_pages_exact() automatically does
5444 */
5445 if (get_order(size) < MAX_ORDER) {
5446 table = alloc_pages_exact(size, GFP_ATOMIC);
5447 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5448 }
5449 }
5450 } while (!table && size > PAGE_SIZE && --log2qty);
5451
5452 if (!table)
5453 panic("Failed to allocate %s hash table\n", tablename);
5454
5455 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
5456 tablename,
5457 (1UL << log2qty),
5458 ilog2(size) - PAGE_SHIFT,
5459 size);
5460
5461 if (_hash_shift)
5462 *_hash_shift = log2qty;
5463 if (_hash_mask)
5464 *_hash_mask = (1 << log2qty) - 1;
5465
5466 return table;
5467 }
5468
5469 /* Return a pointer to the bitmap storing bits affecting a block of pages */
5470 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5471 unsigned long pfn)
5472 {
5473 #ifdef CONFIG_SPARSEMEM
5474 return __pfn_to_section(pfn)->pageblock_flags;
5475 #else
5476 return zone->pageblock_flags;
5477 #endif /* CONFIG_SPARSEMEM */
5478 }
5479
5480 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5481 {
5482 #ifdef CONFIG_SPARSEMEM
5483 pfn &= (PAGES_PER_SECTION-1);
5484 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5485 #else
5486 pfn = pfn - zone->zone_start_pfn;
5487 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5488 #endif /* CONFIG_SPARSEMEM */
5489 }
5490
5491 /**
5492 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
5493 * @page: The page within the block of interest
5494 * @start_bitidx: The first bit of interest to retrieve
5495 * @end_bitidx: The last bit of interest
5496 * returns pageblock_bits flags
5497 */
5498 unsigned long get_pageblock_flags_group(struct page *page,
5499 int start_bitidx, int end_bitidx)
5500 {
5501 struct zone *zone;
5502 unsigned long *bitmap;
5503 unsigned long pfn, bitidx;
5504 unsigned long flags = 0;
5505 unsigned long value = 1;
5506
5507 zone = page_zone(page);
5508 pfn = page_to_pfn(page);
5509 bitmap = get_pageblock_bitmap(zone, pfn);
5510 bitidx = pfn_to_bitidx(zone, pfn);
5511
5512 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5513 if (test_bit(bitidx + start_bitidx, bitmap))
5514 flags |= value;
5515
5516 return flags;
5517 }
5518
5519 /**
5520 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
5521 * @page: The page within the block of interest
5522 * @start_bitidx: The first bit of interest
5523 * @end_bitidx: The last bit of interest
5524 * @flags: The flags to set
5525 */
5526 void set_pageblock_flags_group(struct page *page, unsigned long flags,
5527 int start_bitidx, int end_bitidx)
5528 {
5529 struct zone *zone;
5530 unsigned long *bitmap;
5531 unsigned long pfn, bitidx;
5532 unsigned long value = 1;
5533
5534 zone = page_zone(page);
5535 pfn = page_to_pfn(page);
5536 bitmap = get_pageblock_bitmap(zone, pfn);
5537 bitidx = pfn_to_bitidx(zone, pfn);
5538 VM_BUG_ON(pfn < zone->zone_start_pfn);
5539 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
5540
5541 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5542 if (flags & value)
5543 __set_bit(bitidx + start_bitidx, bitmap);
5544 else
5545 __clear_bit(bitidx + start_bitidx, bitmap);
5546 }
5547
5548 /*
5549 * This is designed as sub function...plz see page_isolation.c also.
5550 * set/clear page block's type to be ISOLATE.
5551 * page allocater never alloc memory from ISOLATE block.
5552 */
5553
5554 static int
5555 __count_immobile_pages(struct zone *zone, struct page *page, int count)
5556 {
5557 unsigned long pfn, iter, found;
5558 /*
5559 * For avoiding noise data, lru_add_drain_all() should be called
5560 * If ZONE_MOVABLE, the zone never contains immobile pages
5561 */
5562 if (zone_idx(zone) == ZONE_MOVABLE)
5563 return true;
5564
5565 if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
5566 return true;
5567
5568 pfn = page_to_pfn(page);
5569 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5570 unsigned long check = pfn + iter;
5571
5572 if (!pfn_valid_within(check))
5573 continue;
5574
5575 page = pfn_to_page(check);
5576 if (!page_count(page)) {
5577 if (PageBuddy(page))
5578 iter += (1 << page_order(page)) - 1;
5579 continue;
5580 }
5581 if (!PageLRU(page))
5582 found++;
5583 /*
5584 * If there are RECLAIMABLE pages, we need to check it.
5585 * But now, memory offline itself doesn't call shrink_slab()
5586 * and it still to be fixed.
5587 */
5588 /*
5589 * If the page is not RAM, page_count()should be 0.
5590 * we don't need more check. This is an _used_ not-movable page.
5591 *
5592 * The problematic thing here is PG_reserved pages. PG_reserved
5593 * is set to both of a memory hole page and a _used_ kernel
5594 * page at boot.
5595 */
5596 if (found > count)
5597 return false;
5598 }
5599 return true;
5600 }
5601
5602 bool is_pageblock_removable_nolock(struct page *page)
5603 {
5604 struct zone *zone = page_zone(page);
5605 return __count_immobile_pages(zone, page, 0);
5606 }
5607
5608 int set_migratetype_isolate(struct page *page)
5609 {
5610 struct zone *zone;
5611 unsigned long flags, pfn;
5612 struct memory_isolate_notify arg;
5613 int notifier_ret;
5614 int ret = -EBUSY;
5615
5616 zone = page_zone(page);
5617
5618 spin_lock_irqsave(&zone->lock, flags);
5619
5620 pfn = page_to_pfn(page);
5621 arg.start_pfn = pfn;
5622 arg.nr_pages = pageblock_nr_pages;
5623 arg.pages_found = 0;
5624
5625 /*
5626 * It may be possible to isolate a pageblock even if the
5627 * migratetype is not MIGRATE_MOVABLE. The memory isolation
5628 * notifier chain is used by balloon drivers to return the
5629 * number of pages in a range that are held by the balloon
5630 * driver to shrink memory. If all the pages are accounted for
5631 * by balloons, are free, or on the LRU, isolation can continue.
5632 * Later, for example, when memory hotplug notifier runs, these
5633 * pages reported as "can be isolated" should be isolated(freed)
5634 * by the balloon driver through the memory notifier chain.
5635 */
5636 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5637 notifier_ret = notifier_to_errno(notifier_ret);
5638 if (notifier_ret)
5639 goto out;
5640 /*
5641 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
5642 * We just check MOVABLE pages.
5643 */
5644 if (__count_immobile_pages(zone, page, arg.pages_found))
5645 ret = 0;
5646
5647 /*
5648 * immobile means "not-on-lru" paes. If immobile is larger than
5649 * removable-by-driver pages reported by notifier, we'll fail.
5650 */
5651
5652 out:
5653 if (!ret) {
5654 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5655 move_freepages_block(zone, page, MIGRATE_ISOLATE);
5656 }
5657
5658 spin_unlock_irqrestore(&zone->lock, flags);
5659 if (!ret)
5660 drain_all_pages();
5661 return ret;
5662 }
5663
5664 void unset_migratetype_isolate(struct page *page)
5665 {
5666 struct zone *zone;
5667 unsigned long flags;
5668 zone = page_zone(page);
5669 spin_lock_irqsave(&zone->lock, flags);
5670 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5671 goto out;
5672 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5673 move_freepages_block(zone, page, MIGRATE_MOVABLE);
5674 out:
5675 spin_unlock_irqrestore(&zone->lock, flags);
5676 }
5677
5678 #ifdef CONFIG_MEMORY_HOTREMOVE
5679 /*
5680 * All pages in the range must be isolated before calling this.
5681 */
5682 void
5683 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5684 {
5685 struct page *page;
5686 struct zone *zone;
5687 int order, i;
5688 unsigned long pfn;
5689 unsigned long flags;
5690 /* find the first valid pfn */
5691 for (pfn = start_pfn; pfn < end_pfn; pfn++)
5692 if (pfn_valid(pfn))
5693 break;
5694 if (pfn == end_pfn)
5695 return;
5696 zone = page_zone(pfn_to_page(pfn));
5697 spin_lock_irqsave(&zone->lock, flags);
5698 pfn = start_pfn;
5699 while (pfn < end_pfn) {
5700 if (!pfn_valid(pfn)) {
5701 pfn++;
5702 continue;
5703 }
5704 page = pfn_to_page(pfn);
5705 BUG_ON(page_count(page));
5706 BUG_ON(!PageBuddy(page));
5707 order = page_order(page);
5708 #ifdef CONFIG_DEBUG_VM
5709 printk(KERN_INFO "remove from free list %lx %d %lx\n",
5710 pfn, 1 << order, end_pfn);
5711 #endif
5712 list_del(&page->lru);
5713 rmv_page_order(page);
5714 zone->free_area[order].nr_free--;
5715 __mod_zone_page_state(zone, NR_FREE_PAGES,
5716 - (1UL << order));
5717 for (i = 0; i < (1 << order); i++)
5718 SetPageReserved((page+i));
5719 pfn += (1 << order);
5720 }
5721 spin_unlock_irqrestore(&zone->lock, flags);
5722 }
5723 #endif
5724
5725 #ifdef CONFIG_MEMORY_FAILURE
5726 bool is_free_buddy_page(struct page *page)
5727 {
5728 struct zone *zone = page_zone(page);
5729 unsigned long pfn = page_to_pfn(page);
5730 unsigned long flags;
5731 int order;
5732
5733 spin_lock_irqsave(&zone->lock, flags);
5734 for (order = 0; order < MAX_ORDER; order++) {
5735 struct page *page_head = page - (pfn & ((1 << order) - 1));
5736
5737 if (PageBuddy(page_head) && page_order(page_head) >= order)
5738 break;
5739 }
5740 spin_unlock_irqrestore(&zone->lock, flags);
5741
5742 return order < MAX_ORDER;
5743 }
5744 #endif
5745
5746 static struct trace_print_flags pageflag_names[] = {
5747 {1UL << PG_locked, "locked" },
5748 {1UL << PG_error, "error" },
5749 {1UL << PG_referenced, "referenced" },
5750 {1UL << PG_uptodate, "uptodate" },
5751 {1UL << PG_dirty, "dirty" },
5752 {1UL << PG_lru, "lru" },
5753 {1UL << PG_active, "active" },
5754 {1UL << PG_slab, "slab" },
5755 {1UL << PG_owner_priv_1, "owner_priv_1" },
5756 {1UL << PG_arch_1, "arch_1" },
5757 {1UL << PG_reserved, "reserved" },
5758 {1UL << PG_private, "private" },
5759 {1UL << PG_private_2, "private_2" },
5760 {1UL << PG_writeback, "writeback" },
5761 #ifdef CONFIG_PAGEFLAGS_EXTENDED
5762 {1UL << PG_head, "head" },
5763 {1UL << PG_tail, "tail" },
5764 #else
5765 {1UL << PG_compound, "compound" },
5766 #endif
5767 {1UL << PG_swapcache, "swapcache" },
5768 {1UL << PG_mappedtodisk, "mappedtodisk" },
5769 {1UL << PG_reclaim, "reclaim" },
5770 {1UL << PG_swapbacked, "swapbacked" },
5771 {1UL << PG_unevictable, "unevictable" },
5772 #ifdef CONFIG_MMU
5773 {1UL << PG_mlocked, "mlocked" },
5774 #endif
5775 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
5776 {1UL << PG_uncached, "uncached" },
5777 #endif
5778 #ifdef CONFIG_MEMORY_FAILURE
5779 {1UL << PG_hwpoison, "hwpoison" },
5780 #endif
5781 {-1UL, NULL },
5782 };
5783
5784 static void dump_page_flags(unsigned long flags)
5785 {
5786 const char *delim = "";
5787 unsigned long mask;
5788 int i;
5789
5790 printk(KERN_ALERT "page flags: %#lx(", flags);
5791
5792 /* remove zone id */
5793 flags &= (1UL << NR_PAGEFLAGS) - 1;
5794
5795 for (i = 0; pageflag_names[i].name && flags; i++) {
5796
5797 mask = pageflag_names[i].mask;
5798 if ((flags & mask) != mask)
5799 continue;
5800
5801 flags &= ~mask;
5802 printk("%s%s", delim, pageflag_names[i].name);
5803 delim = "|";
5804 }
5805
5806 /* check for left over flags */
5807 if (flags)
5808 printk("%s%#lx", delim, flags);
5809
5810 printk(")\n");
5811 }
5812
5813 void dump_page(struct page *page)
5814 {
5815 printk(KERN_ALERT
5816 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
5817 page, atomic_read(&page->_count), page_mapcount(page),
5818 page->mapping, page->index);
5819 dump_page_flags(page->flags);
5820 mem_cgroup_print_bad_page(page);
5821 }