Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_MMZONE_H |
2 | #define _LINUX_MMZONE_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | #ifndef __ASSEMBLY__ | |
6 | ||
1da177e4 LT |
7 | #include <linux/spinlock.h> |
8 | #include <linux/list.h> | |
9 | #include <linux/wait.h> | |
10 | #include <linux/cache.h> | |
11 | #include <linux/threads.h> | |
12 | #include <linux/numa.h> | |
13 | #include <linux/init.h> | |
bdc8cb98 | 14 | #include <linux/seqlock.h> |
8357f869 | 15 | #include <linux/nodemask.h> |
1da177e4 | 16 | #include <asm/atomic.h> |
93ff66bf | 17 | #include <asm/page.h> |
1da177e4 LT |
18 | |
19 | /* Free memory management - zoned buddy allocator. */ | |
20 | #ifndef CONFIG_FORCE_MAX_ZONEORDER | |
21 | #define MAX_ORDER 11 | |
22 | #else | |
23 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER | |
24 | #endif | |
e984bb43 | 25 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) |
1da177e4 LT |
26 | |
27 | struct free_area { | |
28 | struct list_head free_list; | |
29 | unsigned long nr_free; | |
30 | }; | |
31 | ||
32 | struct pglist_data; | |
33 | ||
34 | /* | |
35 | * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. | |
36 | * So add a wild amount of padding here to ensure that they fall into separate | |
37 | * cachelines. There are very few zone structures in the machine, so space | |
38 | * consumption is not a concern here. | |
39 | */ | |
40 | #if defined(CONFIG_SMP) | |
41 | struct zone_padding { | |
42 | char x[0]; | |
22fc6ecc | 43 | } ____cacheline_internodealigned_in_smp; |
1da177e4 LT |
44 | #define ZONE_PADDING(name) struct zone_padding name; |
45 | #else | |
46 | #define ZONE_PADDING(name) | |
47 | #endif | |
48 | ||
2244b95a | 49 | enum zone_stat_item { |
f3dbd344 CL |
50 | NR_ANON_PAGES, /* Mapped anonymous pages */ |
51 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. | |
65ba55f5 | 52 | only modified from process context */ |
347ce434 | 53 | NR_FILE_PAGES, |
9a865ffa | 54 | NR_SLAB, /* Pages used by slab allocator */ |
df849a15 | 55 | NR_PAGETABLE, /* used for pagetables */ |
b1e7a8fd | 56 | NR_FILE_DIRTY, |
ce866b34 | 57 | NR_WRITEBACK, |
fd39fc85 | 58 | NR_UNSTABLE_NFS, /* NFS unstable pages */ |
d2c5e30c | 59 | NR_BOUNCE, |
ca889e6c CL |
60 | #ifdef CONFIG_NUMA |
61 | NUMA_HIT, /* allocated in intended node */ | |
62 | NUMA_MISS, /* allocated in non intended node */ | |
63 | NUMA_FOREIGN, /* was intended here, hit elsewhere */ | |
64 | NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ | |
65 | NUMA_LOCAL, /* allocation from local node */ | |
66 | NUMA_OTHER, /* allocation from other node */ | |
67 | #endif | |
2244b95a CL |
68 | NR_VM_ZONE_STAT_ITEMS }; |
69 | ||
1da177e4 LT |
70 | struct per_cpu_pages { |
71 | int count; /* number of pages in the list */ | |
1da177e4 LT |
72 | int high; /* high watermark, emptying needed */ |
73 | int batch; /* chunk size for buddy add/remove */ | |
74 | struct list_head list; /* the list of pages */ | |
75 | }; | |
76 | ||
77 | struct per_cpu_pageset { | |
78 | struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ | |
2244b95a | 79 | #ifdef CONFIG_SMP |
df9ecaba | 80 | s8 stat_threshold; |
2244b95a CL |
81 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; |
82 | #endif | |
1da177e4 LT |
83 | } ____cacheline_aligned_in_smp; |
84 | ||
e7c8d5c9 CL |
85 | #ifdef CONFIG_NUMA |
86 | #define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)]) | |
87 | #else | |
88 | #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) | |
89 | #endif | |
90 | ||
2f1b6248 CL |
91 | enum zone_type { |
92 | /* | |
93 | * ZONE_DMA is used when there are devices that are not able | |
94 | * to do DMA to all of addressable memory (ZONE_NORMAL). Then we | |
95 | * carve out the portion of memory that is needed for these devices. | |
96 | * The range is arch specific. | |
97 | * | |
98 | * Some examples | |
99 | * | |
100 | * Architecture Limit | |
101 | * --------------------------- | |
102 | * parisc, ia64, sparc <4G | |
103 | * s390 <2G | |
104 | * arm26 <48M | |
105 | * arm Various | |
106 | * alpha Unlimited or 0-16MB. | |
107 | * | |
108 | * i386, x86_64 and multiple other arches | |
109 | * <16M. | |
110 | */ | |
111 | ZONE_DMA, | |
fb0e7942 | 112 | #ifdef CONFIG_ZONE_DMA32 |
2f1b6248 CL |
113 | /* |
114 | * x86_64 needs two ZONE_DMAs because it supports devices that are | |
115 | * only able to do DMA to the lower 16M but also 32 bit devices that | |
116 | * can only do DMA areas below 4G. | |
117 | */ | |
118 | ZONE_DMA32, | |
fb0e7942 | 119 | #endif |
2f1b6248 CL |
120 | /* |
121 | * Normal addressable memory is in ZONE_NORMAL. DMA operations can be | |
122 | * performed on pages in ZONE_NORMAL if the DMA devices support | |
123 | * transfers to all addressable memory. | |
124 | */ | |
125 | ZONE_NORMAL, | |
126 | /* | |
127 | * A memory area that is only addressable by the kernel through | |
128 | * mapping portions into its own address space. This is for example | |
129 | * used by i386 to allow the kernel to address the memory beyond | |
130 | * 900MB. The kernel will set up special mappings (page | |
131 | * table entries on i386) for each page that the kernel needs to | |
132 | * access. | |
133 | */ | |
134 | ZONE_HIGHMEM, | |
1da177e4 | 135 | |
2f1b6248 CL |
136 | MAX_NR_ZONES |
137 | }; | |
1da177e4 | 138 | |
2f1b6248 | 139 | #define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */ |
1da177e4 LT |
140 | |
141 | /* | |
142 | * When a memory allocation must conform to specific limitations (such | |
143 | * as being suitable for DMA) the caller will pass in hints to the | |
144 | * allocator in the gfp_mask, in the zone modifier bits. These bits | |
145 | * are used to select a priority ordered list of memory zones which | |
146 | * match the requested limits. GFP_ZONEMASK defines which bits within | |
147 | * the gfp_mask should be considered as zone modifiers. Each valid | |
148 | * combination of the zone modifier bits has a corresponding list | |
149 | * of zones (in node_zonelists). Thus for two zone modifiers there | |
150 | * will be a maximum of 4 (2 ** 2) zonelists, for 3 modifiers there will | |
151 | * be 8 (2 ** 3) zonelists. GFP_ZONETYPES defines the number of possible | |
152 | * combinations of zone modifiers in "zone modifier space". | |
ac3461ad | 153 | * |
79046ae0 AW |
154 | * As an optimisation any zone modifier bits which are only valid when |
155 | * no other zone modifier bits are set (loners) should be placed in | |
156 | * the highest order bits of this field. This allows us to reduce the | |
157 | * extent of the zonelists thus saving space. For example in the case | |
158 | * of three zone modifier bits, we could require up to eight zonelists. | |
159 | * If the left most zone modifier is a "loner" then the highest valid | |
160 | * zonelist would be four allowing us to allocate only five zonelists. | |
ce2ea89b AW |
161 | * Use the first form for GFP_ZONETYPES when the left most bit is not |
162 | * a "loner", otherwise use the second. | |
79046ae0 | 163 | * |
ac3461ad | 164 | * NOTE! Make sure this matches the zones in <linux/gfp.h> |
1da177e4 | 165 | */ |
fb0e7942 CL |
166 | #define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */ |
167 | ||
168 | #ifdef CONFIG_ZONE_DMA32 | |
169 | #define GFP_ZONEMASK 0x07 | |
170 | #else | |
171 | #define GFP_ZONEMASK 0x03 | |
172 | #endif | |
1da177e4 | 173 | |
1da177e4 LT |
174 | struct zone { |
175 | /* Fields commonly accessed by the page allocator */ | |
176 | unsigned long free_pages; | |
177 | unsigned long pages_min, pages_low, pages_high; | |
178 | /* | |
179 | * We don't know if the memory that we're going to allocate will be freeable | |
180 | * or/and it will be released eventually, so to avoid totally wasting several | |
181 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk | |
182 | * to run OOM on the lower zones despite there's tons of freeable ram | |
183 | * on the higher zones). This array is recalculated at runtime if the | |
184 | * sysctl_lowmem_reserve_ratio sysctl changes. | |
185 | */ | |
186 | unsigned long lowmem_reserve[MAX_NR_ZONES]; | |
187 | ||
e7c8d5c9 | 188 | #ifdef CONFIG_NUMA |
9614634f CL |
189 | /* |
190 | * zone reclaim becomes active if more unmapped pages exist. | |
191 | */ | |
192 | unsigned long min_unmapped_ratio; | |
e7c8d5c9 CL |
193 | struct per_cpu_pageset *pageset[NR_CPUS]; |
194 | #else | |
1da177e4 | 195 | struct per_cpu_pageset pageset[NR_CPUS]; |
e7c8d5c9 | 196 | #endif |
1da177e4 LT |
197 | /* |
198 | * free areas of different sizes | |
199 | */ | |
200 | spinlock_t lock; | |
bdc8cb98 DH |
201 | #ifdef CONFIG_MEMORY_HOTPLUG |
202 | /* see spanned/present_pages for more description */ | |
203 | seqlock_t span_seqlock; | |
204 | #endif | |
1da177e4 LT |
205 | struct free_area free_area[MAX_ORDER]; |
206 | ||
207 | ||
208 | ZONE_PADDING(_pad1_) | |
209 | ||
210 | /* Fields commonly accessed by the page reclaim scanner */ | |
211 | spinlock_t lru_lock; | |
212 | struct list_head active_list; | |
213 | struct list_head inactive_list; | |
214 | unsigned long nr_scan_active; | |
215 | unsigned long nr_scan_inactive; | |
216 | unsigned long nr_active; | |
217 | unsigned long nr_inactive; | |
218 | unsigned long pages_scanned; /* since last reclaim */ | |
219 | int all_unreclaimable; /* All pages pinned */ | |
220 | ||
1e7e5a90 MH |
221 | /* A count of how many reclaimers are scanning this zone */ |
222 | atomic_t reclaim_in_progress; | |
753ee728 | 223 | |
2244b95a CL |
224 | /* Zone statistics */ |
225 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | |
9eeff239 | 226 | |
1da177e4 LT |
227 | /* |
228 | * prev_priority holds the scanning priority for this zone. It is | |
229 | * defined as the scanning priority at which we achieved our reclaim | |
230 | * target at the previous try_to_free_pages() or balance_pgdat() | |
231 | * invokation. | |
232 | * | |
233 | * We use prev_priority as a measure of how much stress page reclaim is | |
234 | * under - it drives the swappiness decision: whether to unmap mapped | |
235 | * pages. | |
236 | * | |
237 | * temp_priority is used to remember the scanning priority at which | |
238 | * this zone was successfully refilled to free_pages == pages_high. | |
239 | * | |
240 | * Access to both these fields is quite racy even on uniprocessor. But | |
241 | * it is expected to average out OK. | |
242 | */ | |
243 | int temp_priority; | |
244 | int prev_priority; | |
245 | ||
246 | ||
247 | ZONE_PADDING(_pad2_) | |
248 | /* Rarely used or read-mostly fields */ | |
249 | ||
250 | /* | |
251 | * wait_table -- the array holding the hash table | |
02b694de | 252 | * wait_table_hash_nr_entries -- the size of the hash table array |
1da177e4 LT |
253 | * wait_table_bits -- wait_table_size == (1 << wait_table_bits) |
254 | * | |
255 | * The purpose of all these is to keep track of the people | |
256 | * waiting for a page to become available and make them | |
257 | * runnable again when possible. The trouble is that this | |
258 | * consumes a lot of space, especially when so few things | |
259 | * wait on pages at a given time. So instead of using | |
260 | * per-page waitqueues, we use a waitqueue hash table. | |
261 | * | |
262 | * The bucket discipline is to sleep on the same queue when | |
263 | * colliding and wake all in that wait queue when removing. | |
264 | * When something wakes, it must check to be sure its page is | |
265 | * truly available, a la thundering herd. The cost of a | |
266 | * collision is great, but given the expected load of the | |
267 | * table, they should be so rare as to be outweighed by the | |
268 | * benefits from the saved space. | |
269 | * | |
270 | * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the | |
271 | * primary users of these fields, and in mm/page_alloc.c | |
272 | * free_area_init_core() performs the initialization of them. | |
273 | */ | |
274 | wait_queue_head_t * wait_table; | |
02b694de | 275 | unsigned long wait_table_hash_nr_entries; |
1da177e4 LT |
276 | unsigned long wait_table_bits; |
277 | ||
278 | /* | |
279 | * Discontig memory support fields. | |
280 | */ | |
281 | struct pglist_data *zone_pgdat; | |
1da177e4 LT |
282 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ |
283 | unsigned long zone_start_pfn; | |
284 | ||
bdc8cb98 DH |
285 | /* |
286 | * zone_start_pfn, spanned_pages and present_pages are all | |
287 | * protected by span_seqlock. It is a seqlock because it has | |
288 | * to be read outside of zone->lock, and it is done in the main | |
289 | * allocator path. But, it is written quite infrequently. | |
290 | * | |
291 | * The lock is declared along with zone->lock because it is | |
292 | * frequently read in proximity to zone->lock. It's good to | |
293 | * give them a chance of being in the same cacheline. | |
294 | */ | |
1da177e4 LT |
295 | unsigned long spanned_pages; /* total size, including holes */ |
296 | unsigned long present_pages; /* amount of memory (excluding holes) */ | |
297 | ||
298 | /* | |
299 | * rarely used fields: | |
300 | */ | |
301 | char *name; | |
22fc6ecc | 302 | } ____cacheline_internodealigned_in_smp; |
1da177e4 | 303 | |
1da177e4 LT |
304 | /* |
305 | * The "priority" of VM scanning is how much of the queues we will scan in one | |
306 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | |
307 | * queues ("queue_length >> 12") during an aging round. | |
308 | */ | |
309 | #define DEF_PRIORITY 12 | |
310 | ||
311 | /* | |
312 | * One allocation request operates on a zonelist. A zonelist | |
313 | * is a list of zones, the first one is the 'goal' of the | |
314 | * allocation, the other zones are fallback zones, in decreasing | |
315 | * priority. | |
316 | * | |
317 | * Right now a zonelist takes up less than a cacheline. We never | |
318 | * modify it apart from boot-up, and only a few indices are used, | |
319 | * so despite the zonelist table being relatively big, the cache | |
320 | * footprint of this construct is very small. | |
321 | */ | |
322 | struct zonelist { | |
323 | struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited | |
324 | }; | |
325 | ||
326 | ||
327 | /* | |
328 | * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM | |
329 | * (mostly NUMA machines?) to denote a higher-level memory zone than the | |
330 | * zone denotes. | |
331 | * | |
332 | * On NUMA machines, each NUMA node would have a pg_data_t to describe | |
333 | * it's memory layout. | |
334 | * | |
335 | * Memory statistics and page replacement data structures are maintained on a | |
336 | * per-zone basis. | |
337 | */ | |
338 | struct bootmem_data; | |
339 | typedef struct pglist_data { | |
340 | struct zone node_zones[MAX_NR_ZONES]; | |
341 | struct zonelist node_zonelists[GFP_ZONETYPES]; | |
342 | int nr_zones; | |
d41dee36 | 343 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
1da177e4 | 344 | struct page *node_mem_map; |
d41dee36 | 345 | #endif |
1da177e4 | 346 | struct bootmem_data *bdata; |
208d54e5 DH |
347 | #ifdef CONFIG_MEMORY_HOTPLUG |
348 | /* | |
349 | * Must be held any time you expect node_start_pfn, node_present_pages | |
350 | * or node_spanned_pages stay constant. Holding this will also | |
351 | * guarantee that any pfn_valid() stays that way. | |
352 | * | |
353 | * Nests above zone->lock and zone->size_seqlock. | |
354 | */ | |
355 | spinlock_t node_size_lock; | |
356 | #endif | |
1da177e4 LT |
357 | unsigned long node_start_pfn; |
358 | unsigned long node_present_pages; /* total number of physical pages */ | |
359 | unsigned long node_spanned_pages; /* total size of physical page | |
360 | range, including holes */ | |
361 | int node_id; | |
1da177e4 LT |
362 | wait_queue_head_t kswapd_wait; |
363 | struct task_struct *kswapd; | |
364 | int kswapd_max_order; | |
365 | } pg_data_t; | |
366 | ||
367 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) | |
368 | #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) | |
d41dee36 | 369 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
408fde81 | 370 | #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) |
d41dee36 AW |
371 | #else |
372 | #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) | |
373 | #endif | |
408fde81 | 374 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) |
1da177e4 | 375 | |
208d54e5 DH |
376 | #include <linux/memory_hotplug.h> |
377 | ||
1da177e4 LT |
378 | void __get_zone_counts(unsigned long *active, unsigned long *inactive, |
379 | unsigned long *free, struct pglist_data *pgdat); | |
380 | void get_zone_counts(unsigned long *active, unsigned long *inactive, | |
381 | unsigned long *free); | |
382 | void build_all_zonelists(void); | |
383 | void wakeup_kswapd(struct zone *zone, int order); | |
384 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |
7fb1d9fc | 385 | int classzone_idx, int alloc_flags); |
1da177e4 | 386 | |
718127cc YG |
387 | extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, |
388 | unsigned long size); | |
389 | ||
1da177e4 LT |
390 | #ifdef CONFIG_HAVE_MEMORY_PRESENT |
391 | void memory_present(int nid, unsigned long start, unsigned long end); | |
392 | #else | |
393 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} | |
394 | #endif | |
395 | ||
396 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE | |
397 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | |
398 | #endif | |
399 | ||
400 | /* | |
401 | * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. | |
402 | */ | |
403 | #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) | |
404 | ||
f3fe6512 CK |
405 | static inline int populated_zone(struct zone *zone) |
406 | { | |
407 | return (!!zone->present_pages); | |
408 | } | |
409 | ||
2f1b6248 | 410 | static inline int is_highmem_idx(enum zone_type idx) |
1da177e4 LT |
411 | { |
412 | return (idx == ZONE_HIGHMEM); | |
413 | } | |
414 | ||
2f1b6248 | 415 | static inline int is_normal_idx(enum zone_type idx) |
1da177e4 LT |
416 | { |
417 | return (idx == ZONE_NORMAL); | |
418 | } | |
9328b8fa | 419 | |
1da177e4 LT |
420 | /** |
421 | * is_highmem - helper function to quickly check if a struct zone is a | |
422 | * highmem zone or not. This is an attempt to keep references | |
423 | * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. | |
424 | * @zone - pointer to struct zone variable | |
425 | */ | |
426 | static inline int is_highmem(struct zone *zone) | |
427 | { | |
428 | return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM; | |
429 | } | |
430 | ||
431 | static inline int is_normal(struct zone *zone) | |
432 | { | |
433 | return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; | |
434 | } | |
435 | ||
9328b8fa NP |
436 | static inline int is_dma32(struct zone *zone) |
437 | { | |
fb0e7942 | 438 | #ifdef CONFIG_ZONE_DMA32 |
9328b8fa | 439 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; |
fb0e7942 CL |
440 | #else |
441 | return 0; | |
442 | #endif | |
9328b8fa NP |
443 | } |
444 | ||
445 | static inline int is_dma(struct zone *zone) | |
446 | { | |
447 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA; | |
448 | } | |
449 | ||
1da177e4 LT |
450 | /* These two functions are used to setup the per zone pages min values */ |
451 | struct ctl_table; | |
452 | struct file; | |
453 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, | |
454 | void __user *, size_t *, loff_t *); | |
455 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; | |
456 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, | |
457 | void __user *, size_t *, loff_t *); | |
8ad4b1fb RS |
458 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, |
459 | void __user *, size_t *, loff_t *); | |
9614634f CL |
460 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, |
461 | struct file *, void __user *, size_t *, loff_t *); | |
1da177e4 LT |
462 | |
463 | #include <linux/topology.h> | |
464 | /* Returns the number of the current Node. */ | |
69d81fcd | 465 | #ifndef numa_node_id |
39c715b7 | 466 | #define numa_node_id() (cpu_to_node(raw_smp_processor_id())) |
69d81fcd | 467 | #endif |
1da177e4 | 468 | |
93b7504e | 469 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
1da177e4 LT |
470 | |
471 | extern struct pglist_data contig_page_data; | |
472 | #define NODE_DATA(nid) (&contig_page_data) | |
473 | #define NODE_MEM_MAP(nid) mem_map | |
474 | #define MAX_NODES_SHIFT 1 | |
1da177e4 | 475 | |
93b7504e | 476 | #else /* CONFIG_NEED_MULTIPLE_NODES */ |
1da177e4 LT |
477 | |
478 | #include <asm/mmzone.h> | |
479 | ||
93b7504e | 480 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
348f8b6c | 481 | |
95144c78 KH |
482 | extern struct pglist_data *first_online_pgdat(void); |
483 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); | |
484 | extern struct zone *next_zone(struct zone *zone); | |
8357f869 KH |
485 | |
486 | /** | |
487 | * for_each_pgdat - helper macro to iterate over all nodes | |
488 | * @pgdat - pointer to a pg_data_t variable | |
489 | */ | |
490 | #define for_each_online_pgdat(pgdat) \ | |
491 | for (pgdat = first_online_pgdat(); \ | |
492 | pgdat; \ | |
493 | pgdat = next_online_pgdat(pgdat)) | |
8357f869 KH |
494 | /** |
495 | * for_each_zone - helper macro to iterate over all memory zones | |
496 | * @zone - pointer to struct zone variable | |
497 | * | |
498 | * The user only needs to declare the zone variable, for_each_zone | |
499 | * fills it in. | |
500 | */ | |
501 | #define for_each_zone(zone) \ | |
502 | for (zone = (first_online_pgdat())->node_zones; \ | |
503 | zone; \ | |
504 | zone = next_zone(zone)) | |
505 | ||
d41dee36 AW |
506 | #ifdef CONFIG_SPARSEMEM |
507 | #include <asm/sparsemem.h> | |
508 | #endif | |
509 | ||
07808b74 | 510 | #if BITS_PER_LONG == 32 |
1da177e4 | 511 | /* |
a2f1b424 AK |
512 | * with 32 bit page->flags field, we reserve 9 bits for node/zone info. |
513 | * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes. | |
1da177e4 | 514 | */ |
a2f1b424 | 515 | #define FLAGS_RESERVED 9 |
348f8b6c | 516 | |
1da177e4 LT |
517 | #elif BITS_PER_LONG == 64 |
518 | /* | |
519 | * with 64 bit flags field, there's plenty of room. | |
520 | */ | |
348f8b6c | 521 | #define FLAGS_RESERVED 32 |
1da177e4 | 522 | |
348f8b6c | 523 | #else |
1da177e4 | 524 | |
348f8b6c | 525 | #error BITS_PER_LONG not defined |
1da177e4 | 526 | |
1da177e4 LT |
527 | #endif |
528 | ||
b159d43f AW |
529 | #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID |
530 | #define early_pfn_to_nid(nid) (0UL) | |
531 | #endif | |
532 | ||
2bdaf115 AW |
533 | #ifdef CONFIG_FLATMEM |
534 | #define pfn_to_nid(pfn) (0) | |
535 | #endif | |
536 | ||
d41dee36 AW |
537 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) |
538 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) | |
539 | ||
540 | #ifdef CONFIG_SPARSEMEM | |
541 | ||
542 | /* | |
543 | * SECTION_SHIFT #bits space required to store a section # | |
544 | * | |
545 | * PA_SECTION_SHIFT physical address to/from section number | |
546 | * PFN_SECTION_SHIFT pfn to/from section number | |
547 | */ | |
548 | #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) | |
549 | ||
550 | #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) | |
551 | #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) | |
552 | ||
553 | #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) | |
554 | ||
555 | #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) | |
556 | #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) | |
557 | ||
558 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS | |
559 | #error Allocator MAX_ORDER exceeds SECTION_SIZE | |
560 | #endif | |
561 | ||
562 | struct page; | |
563 | struct mem_section { | |
29751f69 AW |
564 | /* |
565 | * This is, logically, a pointer to an array of struct | |
566 | * pages. However, it is stored with some other magic. | |
567 | * (see sparse.c::sparse_init_one_section()) | |
568 | * | |
30c253e6 AW |
569 | * Additionally during early boot we encode node id of |
570 | * the location of the section here to guide allocation. | |
571 | * (see sparse.c::memory_present()) | |
572 | * | |
29751f69 AW |
573 | * Making it a UL at least makes someone do a cast |
574 | * before using it wrong. | |
575 | */ | |
576 | unsigned long section_mem_map; | |
d41dee36 AW |
577 | }; |
578 | ||
3e347261 BP |
579 | #ifdef CONFIG_SPARSEMEM_EXTREME |
580 | #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) | |
581 | #else | |
582 | #define SECTIONS_PER_ROOT 1 | |
583 | #endif | |
802f192e | 584 | |
3e347261 BP |
585 | #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) |
586 | #define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT) | |
587 | #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) | |
802f192e | 588 | |
3e347261 BP |
589 | #ifdef CONFIG_SPARSEMEM_EXTREME |
590 | extern struct mem_section *mem_section[NR_SECTION_ROOTS]; | |
802f192e | 591 | #else |
3e347261 BP |
592 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; |
593 | #endif | |
d41dee36 | 594 | |
29751f69 AW |
595 | static inline struct mem_section *__nr_to_section(unsigned long nr) |
596 | { | |
3e347261 BP |
597 | if (!mem_section[SECTION_NR_TO_ROOT(nr)]) |
598 | return NULL; | |
599 | return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; | |
29751f69 | 600 | } |
4ca644d9 | 601 | extern int __section_nr(struct mem_section* ms); |
29751f69 AW |
602 | |
603 | /* | |
604 | * We use the lower bits of the mem_map pointer to store | |
605 | * a little bit of information. There should be at least | |
606 | * 3 bits here due to 32-bit alignment. | |
607 | */ | |
608 | #define SECTION_MARKED_PRESENT (1UL<<0) | |
609 | #define SECTION_HAS_MEM_MAP (1UL<<1) | |
610 | #define SECTION_MAP_LAST_BIT (1UL<<2) | |
611 | #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) | |
30c253e6 | 612 | #define SECTION_NID_SHIFT 2 |
29751f69 AW |
613 | |
614 | static inline struct page *__section_mem_map_addr(struct mem_section *section) | |
615 | { | |
616 | unsigned long map = section->section_mem_map; | |
617 | map &= SECTION_MAP_MASK; | |
618 | return (struct page *)map; | |
619 | } | |
620 | ||
621 | static inline int valid_section(struct mem_section *section) | |
622 | { | |
802f192e | 623 | return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); |
29751f69 AW |
624 | } |
625 | ||
626 | static inline int section_has_mem_map(struct mem_section *section) | |
627 | { | |
802f192e | 628 | return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); |
29751f69 AW |
629 | } |
630 | ||
631 | static inline int valid_section_nr(unsigned long nr) | |
632 | { | |
633 | return valid_section(__nr_to_section(nr)); | |
634 | } | |
635 | ||
d41dee36 AW |
636 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) |
637 | { | |
29751f69 | 638 | return __nr_to_section(pfn_to_section_nr(pfn)); |
d41dee36 AW |
639 | } |
640 | ||
d41dee36 AW |
641 | static inline int pfn_valid(unsigned long pfn) |
642 | { | |
643 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | |
644 | return 0; | |
29751f69 | 645 | return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); |
d41dee36 AW |
646 | } |
647 | ||
648 | /* | |
649 | * These are _only_ used during initialisation, therefore they | |
650 | * can use __initdata ... They could have names to indicate | |
651 | * this restriction. | |
652 | */ | |
653 | #ifdef CONFIG_NUMA | |
161599ff AW |
654 | #define pfn_to_nid(pfn) \ |
655 | ({ \ | |
656 | unsigned long __pfn_to_nid_pfn = (pfn); \ | |
657 | page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ | |
658 | }) | |
2bdaf115 AW |
659 | #else |
660 | #define pfn_to_nid(pfn) (0) | |
d41dee36 AW |
661 | #endif |
662 | ||
d41dee36 AW |
663 | #define early_pfn_valid(pfn) pfn_valid(pfn) |
664 | void sparse_init(void); | |
665 | #else | |
666 | #define sparse_init() do {} while (0) | |
28ae55c9 | 667 | #define sparse_index_init(_sec, _nid) do {} while (0) |
d41dee36 AW |
668 | #endif /* CONFIG_SPARSEMEM */ |
669 | ||
670 | #ifndef early_pfn_valid | |
671 | #define early_pfn_valid(pfn) (1) | |
672 | #endif | |
673 | ||
674 | void memory_present(int nid, unsigned long start, unsigned long end); | |
675 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | |
676 | ||
1da177e4 LT |
677 | #endif /* !__ASSEMBLY__ */ |
678 | #endif /* __KERNEL__ */ | |
679 | #endif /* _LINUX_MMZONE_H */ |