usb: gadget: f_mtp: Avoid race between mtp_read and mtp_function_disable
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / mm / sparse-vmemmap.c
CommitLineData
8f6aac41
CL
1/*
2 * Virtual Memory Map support
3 *
cde53535 4 * (C) 2007 sgi. Christoph Lameter.
8f6aac41
CL
5 *
6 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
7 * virt_to_page, page_address() to be implemented as a base offset
8 * calculation without memory access.
9 *
10 * However, virtual mappings need a page table and TLBs. Many Linux
11 * architectures already map their physical space using 1-1 mappings
b595076a 12 * via TLBs. For those arches the virtual memory map is essentially
8f6aac41
CL
13 * for free if we use the same page size as the 1-1 mappings. In that
14 * case the overhead consists of a few additional pages that are
15 * allocated to create a view of memory for vmemmap.
16 *
29c71111
AW
17 * The architecture is expected to provide a vmemmap_populate() function
18 * to instantiate the mapping.
8f6aac41
CL
19 */
20#include <linux/mm.h>
21#include <linux/mmzone.h>
22#include <linux/bootmem.h>
23#include <linux/highmem.h>
5a0e3ad6 24#include <linux/slab.h>
8f6aac41
CL
25#include <linux/spinlock.h>
26#include <linux/vmalloc.h>
8bca44bb 27#include <linux/sched.h>
8f6aac41
CL
28#include <asm/dma.h>
29#include <asm/pgalloc.h>
30#include <asm/pgtable.h>
31
32/*
33 * Allocate a block of memory to be used to back the virtual memory map
34 * or to back the page tables that are used to create the mapping.
35 * Uses the main allocators if they are available, else bootmem.
36 */
e0dc3a53
KH
37
38static void * __init_refok __earlyonly_bootmem_alloc(int node,
39 unsigned long size,
40 unsigned long align,
41 unsigned long goal)
42{
bb016b84
SS
43 return memblock_virt_alloc_try_nid(size, align, goal,
44 BOOTMEM_ALLOC_ACCESSIBLE, node);
e0dc3a53
KH
45}
46
9bdac914
YL
47static void *vmemmap_buf;
48static void *vmemmap_buf_end;
e0dc3a53 49
8f6aac41
CL
50void * __meminit vmemmap_alloc_block(unsigned long size, int node)
51{
52 /* If the main allocator is up use that, fallback to bootmem. */
53 if (slab_is_available()) {
f52407ce
SL
54 struct page *page;
55
56 if (node_state(node, N_HIGH_MEMORY))
055e4fd9
BH
57 page = alloc_pages_node(
58 node, GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
59 get_order(size));
f52407ce 60 else
055e4fd9
BH
61 page = alloc_pages(
62 GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
f52407ce 63 get_order(size));
8f6aac41
CL
64 if (page)
65 return page_address(page);
66 return NULL;
67 } else
e0dc3a53 68 return __earlyonly_bootmem_alloc(node, size, size,
8f6aac41
CL
69 __pa(MAX_DMA_ADDRESS));
70}
71
9bdac914
YL
72/* need to make sure size is all the same during early stage */
73void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
74{
75 void *ptr;
76
77 if (!vmemmap_buf)
78 return vmemmap_alloc_block(size, node);
79
80 /* take the from buf */
81 ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
82 if (ptr + size > vmemmap_buf_end)
83 return vmemmap_alloc_block(size, node);
84
85 vmemmap_buf = ptr + size;
86
87 return ptr;
88}
89
8f6aac41
CL
90void __meminit vmemmap_verify(pte_t *pte, int node,
91 unsigned long start, unsigned long end)
92{
93 unsigned long pfn = pte_pfn(*pte);
94 int actual_node = early_pfn_to_nid(pfn);
95
b41ad14c 96 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
1a44264a
JP
97 printk(KERN_WARNING "[%lx-%lx] potential offnode page_structs\n",
98 start, end - 1);
8f6aac41
CL
99}
100
29c71111 101pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
8f6aac41 102{
29c71111
AW
103 pte_t *pte = pte_offset_kernel(pmd, addr);
104 if (pte_none(*pte)) {
105 pte_t entry;
9bdac914 106 void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
29c71111 107 if (!p)
9dce07f1 108 return NULL;
29c71111
AW
109 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
110 set_pte_at(&init_mm, addr, pte, entry);
111 }
112 return pte;
8f6aac41
CL
113}
114
29c71111 115pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
8f6aac41 116{
29c71111
AW
117 pmd_t *pmd = pmd_offset(pud, addr);
118 if (pmd_none(*pmd)) {
119 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
120 if (!p)
9dce07f1 121 return NULL;
29c71111 122 pmd_populate_kernel(&init_mm, pmd, p);
8f6aac41 123 }
29c71111 124 return pmd;
8f6aac41 125}
8f6aac41 126
29c71111 127pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
8f6aac41 128{
1cac41cb
MB
129#ifdef CONFIG_RKP
130 void *p = NULL ;
131#endif
29c71111
AW
132 pud_t *pud = pud_offset(pgd, addr);
133 if (pud_none(*pud)) {
1cac41cb
MB
134#ifdef CONFIG_RKP
135 p = rkp_ro_alloc();
136#else /* !CONFIG_RKP */
29c71111 137 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
1cac41cb 138#endif
29c71111 139 if (!p)
9dce07f1 140 return NULL;
29c71111
AW
141 pud_populate(&init_mm, pud, p);
142 }
143 return pud;
144}
8f6aac41 145
29c71111
AW
146pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
147{
148 pgd_t *pgd = pgd_offset_k(addr);
149 if (pgd_none(*pgd)) {
150 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
151 if (!p)
9dce07f1 152 return NULL;
29c71111 153 pgd_populate(&init_mm, pgd, p);
8f6aac41 154 }
29c71111 155 return pgd;
8f6aac41
CL
156}
157
0aad818b
JW
158int __meminit vmemmap_populate_basepages(unsigned long start,
159 unsigned long end, int node)
8f6aac41 160{
0aad818b 161 unsigned long addr = start;
29c71111
AW
162 pgd_t *pgd;
163 pud_t *pud;
164 pmd_t *pmd;
165 pte_t *pte;
8f6aac41 166
29c71111
AW
167 for (; addr < end; addr += PAGE_SIZE) {
168 pgd = vmemmap_pgd_populate(addr, node);
169 if (!pgd)
170 return -ENOMEM;
171 pud = vmemmap_pud_populate(pgd, addr, node);
172 if (!pud)
173 return -ENOMEM;
174 pmd = vmemmap_pmd_populate(pud, addr, node);
175 if (!pmd)
176 return -ENOMEM;
177 pte = vmemmap_pte_populate(pmd, addr, node);
178 if (!pte)
179 return -ENOMEM;
180 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
8f6aac41 181 }
29c71111
AW
182
183 return 0;
8f6aac41 184}
8f6aac41 185
98f3cfc1 186struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
8f6aac41 187{
0aad818b
JW
188 unsigned long start;
189 unsigned long end;
190 struct page *map;
191
192 map = pfn_to_page(pnum * PAGES_PER_SECTION);
193 start = (unsigned long)map;
194 end = (unsigned long)(map + PAGES_PER_SECTION);
195
196 if (vmemmap_populate(start, end, nid))
8f6aac41
CL
197 return NULL;
198
199 return map;
200}
9bdac914
YL
201
202void __init sparse_mem_maps_populate_node(struct page **map_map,
203 unsigned long pnum_begin,
204 unsigned long pnum_end,
205 unsigned long map_count, int nodeid)
206{
207 unsigned long pnum;
208 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
209 void *vmemmap_buf_start;
210
211 size = ALIGN(size, PMD_SIZE);
212 vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
213 PMD_SIZE, __pa(MAX_DMA_ADDRESS));
214
215 if (vmemmap_buf_start) {
216 vmemmap_buf = vmemmap_buf_start;
217 vmemmap_buf_end = vmemmap_buf_start + size * map_count;
218 }
219
220 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
221 struct mem_section *ms;
222
223 if (!present_section_nr(pnum))
224 continue;
225
226 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
227 if (map_map[pnum])
228 continue;
229 ms = __nr_to_section(pnum);
1a44264a
JP
230 printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n",
231 __func__);
9bdac914
YL
232 ms->section_mem_map = 0;
233 }
234
235 if (vmemmap_buf_start) {
236 /* need to free left buf */
bb016b84
SS
237 memblock_free_early(__pa(vmemmap_buf),
238 vmemmap_buf_end - vmemmap_buf);
9bdac914
YL
239 vmemmap_buf = NULL;
240 vmemmap_buf_end = NULL;
241 }
242}