[PATCH] zoned vm counters: convert nr_mapped to per zone counter
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / sparse.c
CommitLineData
d41dee36
AW
1/*
2 * sparse memory mappings.
3 */
4#include <linux/config.h>
5#include <linux/mm.h>
6#include <linux/mmzone.h>
7#include <linux/bootmem.h>
0b0acbec 8#include <linux/highmem.h>
d41dee36 9#include <linux/module.h>
28ae55c9 10#include <linux/spinlock.h>
0b0acbec 11#include <linux/vmalloc.h>
d41dee36
AW
12#include <asm/dma.h>
13
14/*
15 * Permanent SPARSEMEM data:
16 *
17 * 1) mem_section - memory sections, mem_map's for valid memory
18 */
3e347261 19#ifdef CONFIG_SPARSEMEM_EXTREME
802f192e 20struct mem_section *mem_section[NR_SECTION_ROOTS]
22fc6ecc 21 ____cacheline_internodealigned_in_smp;
3e347261
BP
22#else
23struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
22fc6ecc 24 ____cacheline_internodealigned_in_smp;
3e347261
BP
25#endif
26EXPORT_SYMBOL(mem_section);
27
3e347261 28#ifdef CONFIG_SPARSEMEM_EXTREME
28ae55c9
DH
29static struct mem_section *sparse_index_alloc(int nid)
30{
31 struct mem_section *section = NULL;
32 unsigned long array_size = SECTIONS_PER_ROOT *
33 sizeof(struct mem_section);
34
39d24e64 35 if (slab_is_available())
46a66eec
MK
36 section = kmalloc_node(array_size, GFP_KERNEL, nid);
37 else
38 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
28ae55c9
DH
39
40 if (section)
41 memset(section, 0, array_size);
42
43 return section;
3e347261 44}
802f192e 45
28ae55c9 46static int sparse_index_init(unsigned long section_nr, int nid)
802f192e 47{
34af946a 48 static DEFINE_SPINLOCK(index_init_lock);
28ae55c9
DH
49 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
50 struct mem_section *section;
51 int ret = 0;
802f192e
BP
52
53 if (mem_section[root])
28ae55c9 54 return -EEXIST;
3e347261 55
28ae55c9
DH
56 section = sparse_index_alloc(nid);
57 /*
58 * This lock keeps two different sections from
59 * reallocating for the same index
60 */
61 spin_lock(&index_init_lock);
3e347261 62
28ae55c9
DH
63 if (mem_section[root]) {
64 ret = -EEXIST;
65 goto out;
66 }
67
68 mem_section[root] = section;
69out:
70 spin_unlock(&index_init_lock);
71 return ret;
72}
73#else /* !SPARSEMEM_EXTREME */
74static inline int sparse_index_init(unsigned long section_nr, int nid)
75{
76 return 0;
802f192e 77}
28ae55c9
DH
78#endif
79
4ca644d9
DH
80/*
81 * Although written for the SPARSEMEM_EXTREME case, this happens
82 * to also work for the flat array case becase
83 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
84 */
85int __section_nr(struct mem_section* ms)
86{
87 unsigned long root_nr;
88 struct mem_section* root;
89
12783b00
MK
90 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
91 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
4ca644d9
DH
92 if (!root)
93 continue;
94
95 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
96 break;
97 }
98
99 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
100}
101
30c253e6
AW
102/*
103 * During early boot, before section_mem_map is used for an actual
104 * mem_map, we use section_mem_map to store the section's NUMA
105 * node. This keeps us from having to use another data structure. The
106 * node information is cleared just before we store the real mem_map.
107 */
108static inline unsigned long sparse_encode_early_nid(int nid)
109{
110 return (nid << SECTION_NID_SHIFT);
111}
112
113static inline int sparse_early_nid(struct mem_section *section)
114{
115 return (section->section_mem_map >> SECTION_NID_SHIFT);
116}
117
d41dee36
AW
118/* Record a memory area against a node. */
119void memory_present(int nid, unsigned long start, unsigned long end)
120{
121 unsigned long pfn;
122
123 start &= PAGE_SECTION_MASK;
124 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
125 unsigned long section = pfn_to_section_nr(pfn);
802f192e
BP
126 struct mem_section *ms;
127
128 sparse_index_init(section, nid);
129
130 ms = __nr_to_section(section);
131 if (!ms->section_mem_map)
30c253e6
AW
132 ms->section_mem_map = sparse_encode_early_nid(nid) |
133 SECTION_MARKED_PRESENT;
d41dee36
AW
134 }
135}
136
137/*
138 * Only used by the i386 NUMA architecures, but relatively
139 * generic code.
140 */
141unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
142 unsigned long end_pfn)
143{
144 unsigned long pfn;
145 unsigned long nr_pages = 0;
146
147 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
148 if (nid != early_pfn_to_nid(pfn))
149 continue;
150
151 if (pfn_valid(pfn))
152 nr_pages += PAGES_PER_SECTION;
153 }
154
155 return nr_pages * sizeof(struct page);
156}
157
29751f69
AW
158/*
159 * Subtle, we encode the real pfn into the mem_map such that
160 * the identity pfn - section_mem_map will return the actual
161 * physical page frame number.
162 */
163static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
164{
165 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
166}
167
168/*
169 * We need this if we ever free the mem_maps. While not implemented yet,
170 * this function is included for parity with its sibling.
171 */
172static __attribute((unused))
173struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
174{
175 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
176}
177
178static int sparse_init_one_section(struct mem_section *ms,
179 unsigned long pnum, struct page *mem_map)
180{
181 if (!valid_section(ms))
182 return -EINVAL;
183
30c253e6 184 ms->section_mem_map &= ~SECTION_MAP_MASK;
29751f69
AW
185 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum);
186
187 return 1;
188}
189
190static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
191{
192 struct page *map;
802f192e 193 struct mem_section *ms = __nr_to_section(pnum);
30c253e6 194 int nid = sparse_early_nid(ms);
29751f69
AW
195
196 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
197 if (map)
198 return map;
199
200 map = alloc_bootmem_node(NODE_DATA(nid),
201 sizeof(struct page) * PAGES_PER_SECTION);
202 if (map)
203 return map;
204
205 printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
802f192e 206 ms->section_mem_map = 0;
29751f69
AW
207 return NULL;
208}
209
0b0acbec
DH
210static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
211{
212 struct page *page, *ret;
213 unsigned long memmap_size = sizeof(struct page) * nr_pages;
214
215 page = alloc_pages(GFP_KERNEL, get_order(memmap_size));
216 if (page)
217 goto got_map_page;
218
219 ret = vmalloc(memmap_size);
220 if (ret)
221 goto got_map_ptr;
222
223 return NULL;
224got_map_page:
225 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
226got_map_ptr:
227 memset(ret, 0, memmap_size);
228
229 return ret;
230}
231
232static int vaddr_in_vmalloc_area(void *addr)
233{
234 if (addr >= (void *)VMALLOC_START &&
235 addr < (void *)VMALLOC_END)
236 return 1;
237 return 0;
238}
239
240static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
241{
242 if (vaddr_in_vmalloc_area(memmap))
243 vfree(memmap);
244 else
245 free_pages((unsigned long)memmap,
246 get_order(sizeof(struct page) * nr_pages));
247}
248
d41dee36
AW
249/*
250 * Allocate the accumulated non-linear sections, allocate a mem_map
251 * for each and record the physical to section mapping.
252 */
253void sparse_init(void)
254{
255 unsigned long pnum;
256 struct page *map;
d41dee36
AW
257
258 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
29751f69 259 if (!valid_section_nr(pnum))
d41dee36
AW
260 continue;
261
29751f69 262 map = sparse_early_mem_map_alloc(pnum);
802f192e
BP
263 if (!map)
264 continue;
265 sparse_init_one_section(__nr_to_section(pnum), pnum, map);
d41dee36
AW
266 }
267}
29751f69
AW
268
269/*
270 * returns the number of sections whose mem_maps were properly
271 * set. If this is <=0, then that means that the passed-in
272 * map was not consumed and must be freed.
273 */
0b0acbec
DH
274int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
275 int nr_pages)
29751f69 276{
0b0acbec
DH
277 unsigned long section_nr = pfn_to_section_nr(start_pfn);
278 struct pglist_data *pgdat = zone->zone_pgdat;
279 struct mem_section *ms;
280 struct page *memmap;
281 unsigned long flags;
282 int ret;
29751f69 283
0b0acbec
DH
284 /*
285 * no locking for this, because it does its own
286 * plus, it does a kmalloc
287 */
288 sparse_index_init(section_nr, pgdat->node_id);
289 memmap = __kmalloc_section_memmap(nr_pages);
290
291 pgdat_resize_lock(pgdat, &flags);
29751f69 292
0b0acbec
DH
293 ms = __pfn_to_section(start_pfn);
294 if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
295 ret = -EEXIST;
296 goto out;
297 }
29751f69
AW
298 ms->section_mem_map |= SECTION_MARKED_PRESENT;
299
0b0acbec
DH
300 ret = sparse_init_one_section(ms, section_nr, memmap);
301
0b0acbec
DH
302out:
303 pgdat_resize_unlock(pgdat, &flags);
46a66eec
MK
304 if (ret <= 0)
305 __kfree_section_memmap(memmap, nr_pages);
0b0acbec 306 return ret;
29751f69 307}