mm, memory_hotplug: get rid of zonelists_mutex
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / mm / page_ext.c
CommitLineData
eefa864b
JK
1#include <linux/mm.h>
2#include <linux/mmzone.h>
3#include <linux/bootmem.h>
4#include <linux/page_ext.h>
5#include <linux/memory.h>
6#include <linux/vmalloc.h>
7#include <linux/kmemleak.h>
48c96a36 8#include <linux/page_owner.h>
33c3fc71 9#include <linux/page_idle.h>
eefa864b
JK
10
11/*
12 * struct page extension
13 *
14 * This is the feature to manage memory for extended data per page.
15 *
16 * Until now, we must modify struct page itself to store extra data per page.
17 * This requires rebuilding the kernel and it is really time consuming process.
18 * And, sometimes, rebuild is impossible due to third party module dependency.
19 * At last, enlarging struct page could cause un-wanted system behaviour change.
20 *
21 * This feature is intended to overcome above mentioned problems. This feature
22 * allocates memory for extended data per page in certain place rather than
23 * the struct page itself. This memory can be accessed by the accessor
24 * functions provided by this code. During the boot process, it checks whether
25 * allocation of huge chunk of memory is needed or not. If not, it avoids
26 * allocating memory at all. With this advantage, we can include this feature
27 * into the kernel in default and can avoid rebuild and solve related problems.
28 *
29 * To help these things to work well, there are two callbacks for clients. One
30 * is the need callback which is mandatory if user wants to avoid useless
31 * memory allocation at boot-time. The other is optional, init callback, which
32 * is used to do proper initialization after memory is allocated.
33 *
34 * The need callback is used to decide whether extended memory allocation is
35 * needed or not. Sometimes users want to deactivate some features in this
36 * boot and extra memory would be unneccessary. In this case, to avoid
37 * allocating huge chunk of memory, each clients represent their need of
38 * extra memory through the need callback. If one of the need callbacks
39 * returns true, it means that someone needs extra memory so that
40 * page extension core should allocates memory for page extension. If
41 * none of need callbacks return true, memory isn't needed at all in this boot
42 * and page extension core can skip to allocate memory. As result,
43 * none of memory is wasted.
44 *
980ac167
JK
45 * When need callback returns true, page_ext checks if there is a request for
46 * extra memory through size in struct page_ext_operations. If it is non-zero,
47 * extra space is allocated for each page_ext entry and offset is returned to
48 * user through offset in struct page_ext_operations.
49 *
eefa864b
JK
50 * The init callback is used to do proper initialization after page extension
51 * is completely initialized. In sparse memory system, extra memory is
52 * allocated some time later than memmap is allocated. In other words, lifetime
53 * of memory for page extension isn't same with memmap for struct page.
54 * Therefore, clients can't store extra data until page extension is
55 * initialized, even if pages are allocated and used freely. This could
56 * cause inadequate state of extra data per page, so, to prevent it, client
57 * can utilize this callback to initialize the state of it correctly.
58 */
59
60static struct page_ext_operations *page_ext_ops[] = {
e30825f1 61 &debug_guardpage_ops,
48c96a36
JK
62#ifdef CONFIG_PAGE_OWNER
63 &page_owner_ops,
64#endif
33c3fc71
VD
65#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
66 &page_idle_ops,
67#endif
eefa864b
JK
68};
69
70static unsigned long total_usage;
980ac167 71static unsigned long extra_mem;
eefa864b
JK
72
73static bool __init invoke_need_callbacks(void)
74{
75 int i;
76 int entries = ARRAY_SIZE(page_ext_ops);
980ac167 77 bool need = false;
eefa864b
JK
78
79 for (i = 0; i < entries; i++) {
980ac167
JK
80 if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
81 page_ext_ops[i]->offset = sizeof(struct page_ext) +
82 extra_mem;
83 extra_mem += page_ext_ops[i]->size;
84 need = true;
85 }
eefa864b
JK
86 }
87
980ac167 88 return need;
eefa864b
JK
89}
90
91static void __init invoke_init_callbacks(void)
92{
93 int i;
94 int entries = ARRAY_SIZE(page_ext_ops);
95
96 for (i = 0; i < entries; i++) {
97 if (page_ext_ops[i]->init)
98 page_ext_ops[i]->init();
99 }
100}
101
980ac167
JK
102static unsigned long get_entry_size(void)
103{
104 return sizeof(struct page_ext) + extra_mem;
105}
106
107static inline struct page_ext *get_entry(void *base, unsigned long index)
108{
109 return base + get_entry_size() * index;
110}
111
eefa864b
JK
112#if !defined(CONFIG_SPARSEMEM)
113
114
115void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
116{
117 pgdat->node_page_ext = NULL;
118}
119
120struct page_ext *lookup_page_ext(struct page *page)
121{
122 unsigned long pfn = page_to_pfn(page);
0b06bb3f 123 unsigned long index;
eefa864b
JK
124 struct page_ext *base;
125
126 base = NODE_DATA(page_to_nid(page))->node_page_ext;
bd33ef36 127#if defined(CONFIG_DEBUG_VM)
eefa864b
JK
128 /*
129 * The sanity checks the page allocator does upon freeing a
130 * page can reach here before the page_ext arrays are
131 * allocated when feeding a range of pages to the allocator
132 * for the first time during bootup or memory hotplug.
133 */
134 if (unlikely(!base))
135 return NULL;
136#endif
0b06bb3f 137 index = pfn - round_down(node_start_pfn(page_to_nid(page)),
eefa864b 138 MAX_ORDER_NR_PAGES);
980ac167 139 return get_entry(base, index);
eefa864b
JK
140}
141
142static int __init alloc_node_page_ext(int nid)
143{
144 struct page_ext *base;
145 unsigned long table_size;
146 unsigned long nr_pages;
147
148 nr_pages = NODE_DATA(nid)->node_spanned_pages;
149 if (!nr_pages)
150 return 0;
151
152 /*
153 * Need extra space if node range is not aligned with
154 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
155 * checks buddy's status, range could be out of exact node range.
156 */
157 if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
158 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
159 nr_pages += MAX_ORDER_NR_PAGES;
160
980ac167 161 table_size = get_entry_size() * nr_pages;
eefa864b
JK
162
163 base = memblock_virt_alloc_try_nid_nopanic(
164 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
165 BOOTMEM_ALLOC_ACCESSIBLE, nid);
166 if (!base)
167 return -ENOMEM;
168 NODE_DATA(nid)->node_page_ext = base;
169 total_usage += table_size;
170 return 0;
171}
172
173void __init page_ext_init_flatmem(void)
174{
175
176 int nid, fail;
177
178 if (!invoke_need_callbacks())
179 return;
180
181 for_each_online_node(nid) {
182 fail = alloc_node_page_ext(nid);
183 if (fail)
184 goto fail;
185 }
186 pr_info("allocated %ld bytes of page_ext\n", total_usage);
187 invoke_init_callbacks();
188 return;
189
190fail:
191 pr_crit("allocation of page_ext failed.\n");
192 panic("Out of memory");
193}
194
195#else /* CONFIG_FLAT_NODE_MEM_MAP */
196
197struct page_ext *lookup_page_ext(struct page *page)
198{
199 unsigned long pfn = page_to_pfn(page);
200 struct mem_section *section = __pfn_to_section(pfn);
bd33ef36 201#if defined(CONFIG_DEBUG_VM)
eefa864b
JK
202 /*
203 * The sanity checks the page allocator does upon freeing a
204 * page can reach here before the page_ext arrays are
205 * allocated when feeding a range of pages to the allocator
206 * for the first time during bootup or memory hotplug.
207 */
208 if (!section->page_ext)
209 return NULL;
210#endif
980ac167 211 return get_entry(section->page_ext, pfn);
eefa864b
JK
212}
213
214static void *__meminit alloc_page_ext(size_t size, int nid)
215{
216 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
217 void *addr = NULL;
218
219 addr = alloc_pages_exact_nid(nid, size, flags);
220 if (addr) {
221 kmemleak_alloc(addr, size, 1, flags);
222 return addr;
223 }
224
225 if (node_state(nid, N_HIGH_MEMORY))
226 addr = vzalloc_node(size, nid);
227 else
228 addr = vzalloc(size);
229
230 return addr;
231}
232
233static int __meminit init_section_page_ext(unsigned long pfn, int nid)
234{
235 struct mem_section *section;
236 struct page_ext *base;
237 unsigned long table_size;
238
239 section = __pfn_to_section(pfn);
240
241 if (section->page_ext)
242 return 0;
243
980ac167 244 table_size = get_entry_size() * PAGES_PER_SECTION;
eefa864b
JK
245 base = alloc_page_ext(table_size, nid);
246
247 /*
248 * The value stored in section->page_ext is (base - pfn)
249 * and it does not point to the memory block allocated above,
250 * causing kmemleak false positives.
251 */
252 kmemleak_not_leak(base);
253
254 if (!base) {
255 pr_err("page ext allocation failure\n");
256 return -ENOMEM;
257 }
258
259 /*
260 * The passed "pfn" may not be aligned to SECTION. For the calculation
261 * we need to apply a mask.
262 */
263 pfn &= PAGE_SECTION_MASK;
980ac167 264 section->page_ext = (void *)base - get_entry_size() * pfn;
eefa864b
JK
265 total_usage += table_size;
266 return 0;
267}
268#ifdef CONFIG_MEMORY_HOTPLUG
269static void free_page_ext(void *addr)
270{
271 if (is_vmalloc_addr(addr)) {
272 vfree(addr);
273 } else {
274 struct page *page = virt_to_page(addr);
275 size_t table_size;
276
980ac167 277 table_size = get_entry_size() * PAGES_PER_SECTION;
eefa864b
JK
278
279 BUG_ON(PageReserved(page));
280 free_pages_exact(addr, table_size);
281 }
282}
283
284static void __free_page_ext(unsigned long pfn)
285{
286 struct mem_section *ms;
287 struct page_ext *base;
288
289 ms = __pfn_to_section(pfn);
290 if (!ms || !ms->page_ext)
291 return;
980ac167 292 base = get_entry(ms->page_ext, pfn);
eefa864b
JK
293 free_page_ext(base);
294 ms->page_ext = NULL;
295}
296
297static int __meminit online_page_ext(unsigned long start_pfn,
298 unsigned long nr_pages,
299 int nid)
300{
301 unsigned long start, end, pfn;
302 int fail = 0;
303
304 start = SECTION_ALIGN_DOWN(start_pfn);
305 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
306
307 if (nid == -1) {
308 /*
309 * In this case, "nid" already exists and contains valid memory.
310 * "start_pfn" passed to us is a pfn which is an arg for
311 * online__pages(), and start_pfn should exist.
312 */
313 nid = pfn_to_nid(start_pfn);
314 VM_BUG_ON(!node_state(nid, N_ONLINE));
315 }
316
317 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
318 if (!pfn_present(pfn))
319 continue;
320 fail = init_section_page_ext(pfn, nid);
321 }
322 if (!fail)
323 return 0;
324
325 /* rollback */
326 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
327 __free_page_ext(pfn);
328
329 return -ENOMEM;
330}
331
332static int __meminit offline_page_ext(unsigned long start_pfn,
333 unsigned long nr_pages, int nid)
334{
335 unsigned long start, end, pfn;
336
337 start = SECTION_ALIGN_DOWN(start_pfn);
338 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
339
340 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
341 __free_page_ext(pfn);
342 return 0;
343
344}
345
346static int __meminit page_ext_callback(struct notifier_block *self,
347 unsigned long action, void *arg)
348{
349 struct memory_notify *mn = arg;
350 int ret = 0;
351
352 switch (action) {
353 case MEM_GOING_ONLINE:
354 ret = online_page_ext(mn->start_pfn,
355 mn->nr_pages, mn->status_change_nid);
356 break;
357 case MEM_OFFLINE:
358 offline_page_ext(mn->start_pfn,
359 mn->nr_pages, mn->status_change_nid);
360 break;
361 case MEM_CANCEL_ONLINE:
362 offline_page_ext(mn->start_pfn,
363 mn->nr_pages, mn->status_change_nid);
364 break;
365 case MEM_GOING_OFFLINE:
366 break;
367 case MEM_ONLINE:
368 case MEM_CANCEL_OFFLINE:
369 break;
370 }
371
372 return notifier_from_errno(ret);
373}
374
375#endif
376
377void __init page_ext_init(void)
378{
379 unsigned long pfn;
380 int nid;
381
382 if (!invoke_need_callbacks())
383 return;
384
385 for_each_node_state(nid, N_MEMORY) {
386 unsigned long start_pfn, end_pfn;
387
388 start_pfn = node_start_pfn(nid);
389 end_pfn = node_end_pfn(nid);
390 /*
391 * start_pfn and end_pfn may not be aligned to SECTION and the
392 * page->flags of out of node pages are not initialized. So we
393 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
394 */
395 for (pfn = start_pfn; pfn < end_pfn;
396 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
397
398 if (!pfn_valid(pfn))
399 continue;
400 /*
401 * Nodes's pfns can be overlapping.
402 * We know some arch can have a nodes layout such as
403 * -------------pfn-------------->
404 * N0 | N1 | N2 | N0 | N1 | N2|....
fe53ca54
YS
405 *
406 * Take into account DEFERRED_STRUCT_PAGE_INIT.
eefa864b 407 */
fe53ca54 408 if (early_pfn_to_nid(pfn) != nid)
eefa864b
JK
409 continue;
410 if (init_section_page_ext(pfn, nid))
411 goto oom;
412 }
413 }
414 hotplug_memory_notifier(page_ext_callback, 0);
415 pr_info("allocated %ld bytes of page_ext\n", total_usage);
416 invoke_init_callbacks();
417 return;
418
419oom:
420 panic("Out of memory");
421}
422
423void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
424{
425}
426
427#endif