Commit | Line | Data |
---|---|---|
3610cce8 | 1 | /* |
239a6425 | 2 | * Copyright IBM Corp. 2007,2009 |
3610cce8 MS |
3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
4 | */ | |
5 | ||
6 | #include <linux/sched.h> | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/errno.h> | |
5a0e3ad6 | 9 | #include <linux/gfp.h> |
3610cce8 MS |
10 | #include <linux/mm.h> |
11 | #include <linux/swap.h> | |
12 | #include <linux/smp.h> | |
13 | #include <linux/highmem.h> | |
3610cce8 MS |
14 | #include <linux/pagemap.h> |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/quicklist.h> | |
80217147 | 18 | #include <linux/rcupdate.h> |
3610cce8 MS |
19 | |
20 | #include <asm/system.h> | |
21 | #include <asm/pgtable.h> | |
22 | #include <asm/pgalloc.h> | |
23 | #include <asm/tlb.h> | |
24 | #include <asm/tlbflush.h> | |
6252d702 | 25 | #include <asm/mmu_context.h> |
3610cce8 | 26 | |
80217147 MS |
27 | struct rcu_table_freelist { |
28 | struct rcu_head rcu; | |
29 | struct mm_struct *mm; | |
30 | unsigned int pgt_index; | |
31 | unsigned int crst_index; | |
32 | unsigned long *table[0]; | |
33 | }; | |
34 | ||
35 | #define RCU_FREELIST_SIZE \ | |
36 | ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \ | |
37 | / sizeof(unsigned long)) | |
38 | ||
80217147 MS |
39 | static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist); |
40 | ||
41 | static void __page_table_free(struct mm_struct *mm, unsigned long *table); | |
80217147 MS |
42 | |
43 | static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm) | |
44 | { | |
45 | struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist); | |
46 | struct rcu_table_freelist *batch = *batchp; | |
47 | ||
48 | if (batch) | |
49 | return batch; | |
50 | batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC); | |
51 | if (batch) { | |
52 | batch->mm = mm; | |
53 | batch->pgt_index = 0; | |
54 | batch->crst_index = RCU_FREELIST_SIZE; | |
55 | *batchp = batch; | |
56 | } | |
57 | return batch; | |
58 | } | |
59 | ||
60 | static void rcu_table_freelist_callback(struct rcu_head *head) | |
61 | { | |
62 | struct rcu_table_freelist *batch = | |
63 | container_of(head, struct rcu_table_freelist, rcu); | |
64 | ||
65 | while (batch->pgt_index > 0) | |
66 | __page_table_free(batch->mm, batch->table[--batch->pgt_index]); | |
67 | while (batch->crst_index < RCU_FREELIST_SIZE) | |
043d0708 | 68 | crst_table_free(batch->mm, batch->table[batch->crst_index++]); |
80217147 MS |
69 | free_page((unsigned long) batch); |
70 | } | |
71 | ||
72 | void rcu_table_freelist_finish(void) | |
73 | { | |
74 | struct rcu_table_freelist *batch = __get_cpu_var(rcu_table_freelist); | |
75 | ||
76 | if (!batch) | |
77 | return; | |
78 | call_rcu(&batch->rcu, rcu_table_freelist_callback); | |
79 | __get_cpu_var(rcu_table_freelist) = NULL; | |
80 | } | |
81 | ||
82 | static void smp_sync(void *arg) | |
83 | { | |
84 | } | |
85 | ||
3610cce8 MS |
86 | #ifndef CONFIG_64BIT |
87 | #define ALLOC_ORDER 1 | |
146e4b3c MS |
88 | #define TABLES_PER_PAGE 4 |
89 | #define FRAG_MASK 15UL | |
90 | #define SECOND_HALVES 10UL | |
402b0862 CO |
91 | |
92 | void clear_table_pgstes(unsigned long *table) | |
93 | { | |
94 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4); | |
95 | memset(table + 256, 0, PAGE_SIZE/4); | |
96 | clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4); | |
97 | memset(table + 768, 0, PAGE_SIZE/4); | |
98 | } | |
99 | ||
3610cce8 MS |
100 | #else |
101 | #define ALLOC_ORDER 2 | |
146e4b3c MS |
102 | #define TABLES_PER_PAGE 2 |
103 | #define FRAG_MASK 3UL | |
104 | #define SECOND_HALVES 2UL | |
402b0862 CO |
105 | |
106 | void clear_table_pgstes(unsigned long *table) | |
107 | { | |
108 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); | |
109 | memset(table + 256, 0, PAGE_SIZE/2); | |
110 | } | |
111 | ||
3610cce8 MS |
112 | #endif |
113 | ||
239a6425 HC |
114 | unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; |
115 | EXPORT_SYMBOL(VMALLOC_START); | |
116 | ||
117 | static int __init parse_vmalloc(char *arg) | |
118 | { | |
119 | if (!arg) | |
120 | return -EINVAL; | |
121 | VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK; | |
122 | return 0; | |
123 | } | |
124 | early_param("vmalloc", parse_vmalloc); | |
125 | ||
043d0708 | 126 | unsigned long *crst_table_alloc(struct mm_struct *mm) |
3610cce8 MS |
127 | { |
128 | struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | |
129 | ||
130 | if (!page) | |
131 | return NULL; | |
3610cce8 MS |
132 | return (unsigned long *) page_to_phys(page); |
133 | } | |
134 | ||
80217147 MS |
135 | void crst_table_free(struct mm_struct *mm, unsigned long *table) |
136 | { | |
043d0708 | 137 | free_pages((unsigned long) table, ALLOC_ORDER); |
80217147 MS |
138 | } |
139 | ||
140 | void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table) | |
141 | { | |
142 | struct rcu_table_freelist *batch; | |
80217147 | 143 | |
80217147 MS |
144 | if (atomic_read(&mm->mm_users) < 2 && |
145 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { | |
043d0708 | 146 | crst_table_free(mm, table); |
80217147 MS |
147 | return; |
148 | } | |
149 | batch = rcu_table_freelist_get(mm); | |
150 | if (!batch) { | |
151 | smp_call_function(smp_sync, NULL, 1); | |
043d0708 | 152 | crst_table_free(mm, table); |
80217147 MS |
153 | return; |
154 | } | |
155 | batch->table[--batch->crst_index] = table; | |
156 | if (batch->pgt_index >= batch->crst_index) | |
157 | rcu_table_freelist_finish(); | |
158 | } | |
159 | ||
6252d702 MS |
160 | #ifdef CONFIG_64BIT |
161 | int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) | |
162 | { | |
163 | unsigned long *table, *pgd; | |
164 | unsigned long entry; | |
165 | ||
166 | BUG_ON(limit > (1UL << 53)); | |
167 | repeat: | |
043d0708 | 168 | table = crst_table_alloc(mm); |
6252d702 MS |
169 | if (!table) |
170 | return -ENOMEM; | |
80217147 | 171 | spin_lock_bh(&mm->page_table_lock); |
6252d702 MS |
172 | if (mm->context.asce_limit < limit) { |
173 | pgd = (unsigned long *) mm->pgd; | |
174 | if (mm->context.asce_limit <= (1UL << 31)) { | |
175 | entry = _REGION3_ENTRY_EMPTY; | |
176 | mm->context.asce_limit = 1UL << 42; | |
177 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
178 | _ASCE_USER_BITS | | |
179 | _ASCE_TYPE_REGION3; | |
180 | } else { | |
181 | entry = _REGION2_ENTRY_EMPTY; | |
182 | mm->context.asce_limit = 1UL << 53; | |
183 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
184 | _ASCE_USER_BITS | | |
185 | _ASCE_TYPE_REGION2; | |
186 | } | |
187 | crst_table_init(table, entry); | |
188 | pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); | |
189 | mm->pgd = (pgd_t *) table; | |
f481bfaf | 190 | mm->task_size = mm->context.asce_limit; |
6252d702 MS |
191 | table = NULL; |
192 | } | |
80217147 | 193 | spin_unlock_bh(&mm->page_table_lock); |
6252d702 MS |
194 | if (table) |
195 | crst_table_free(mm, table); | |
196 | if (mm->context.asce_limit < limit) | |
197 | goto repeat; | |
198 | update_mm(mm, current); | |
199 | return 0; | |
200 | } | |
201 | ||
202 | void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | |
203 | { | |
204 | pgd_t *pgd; | |
205 | ||
206 | if (mm->context.asce_limit <= limit) | |
207 | return; | |
208 | __tlb_flush_mm(mm); | |
209 | while (mm->context.asce_limit > limit) { | |
210 | pgd = mm->pgd; | |
211 | switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { | |
212 | case _REGION_ENTRY_TYPE_R2: | |
213 | mm->context.asce_limit = 1UL << 42; | |
214 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
215 | _ASCE_USER_BITS | | |
216 | _ASCE_TYPE_REGION3; | |
217 | break; | |
218 | case _REGION_ENTRY_TYPE_R3: | |
219 | mm->context.asce_limit = 1UL << 31; | |
220 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
221 | _ASCE_USER_BITS | | |
222 | _ASCE_TYPE_SEGMENT; | |
223 | break; | |
224 | default: | |
225 | BUG(); | |
226 | } | |
227 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); | |
f481bfaf | 228 | mm->task_size = mm->context.asce_limit; |
6252d702 MS |
229 | crst_table_free(mm, (unsigned long *) pgd); |
230 | } | |
231 | update_mm(mm, current); | |
232 | } | |
233 | #endif | |
234 | ||
3610cce8 MS |
235 | /* |
236 | * page table entry allocation/free routines. | |
237 | */ | |
146e4b3c | 238 | unsigned long *page_table_alloc(struct mm_struct *mm) |
3610cce8 | 239 | { |
146e4b3c | 240 | struct page *page; |
3610cce8 | 241 | unsigned long *table; |
146e4b3c | 242 | unsigned long bits; |
3610cce8 | 243 | |
043d0708 | 244 | bits = (mm->context.has_pgste) ? 3UL : 1UL; |
80217147 | 245 | spin_lock_bh(&mm->context.list_lock); |
146e4b3c MS |
246 | page = NULL; |
247 | if (!list_empty(&mm->context.pgtable_list)) { | |
248 | page = list_first_entry(&mm->context.pgtable_list, | |
249 | struct page, lru); | |
250 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) | |
251 | page = NULL; | |
252 | } | |
253 | if (!page) { | |
80217147 | 254 | spin_unlock_bh(&mm->context.list_lock); |
146e4b3c MS |
255 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); |
256 | if (!page) | |
3610cce8 | 257 | return NULL; |
146e4b3c MS |
258 | pgtable_page_ctor(page); |
259 | page->flags &= ~FRAG_MASK; | |
260 | table = (unsigned long *) page_to_phys(page); | |
250cf776 | 261 | if (mm->context.has_pgste) |
402b0862 CO |
262 | clear_table_pgstes(table); |
263 | else | |
264 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); | |
80217147 | 265 | spin_lock_bh(&mm->context.list_lock); |
146e4b3c | 266 | list_add(&page->lru, &mm->context.pgtable_list); |
3610cce8 MS |
267 | } |
268 | table = (unsigned long *) page_to_phys(page); | |
146e4b3c MS |
269 | while (page->flags & bits) { |
270 | table += 256; | |
271 | bits <<= 1; | |
272 | } | |
273 | page->flags |= bits; | |
274 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) | |
275 | list_move_tail(&page->lru, &mm->context.pgtable_list); | |
80217147 | 276 | spin_unlock_bh(&mm->context.list_lock); |
3610cce8 MS |
277 | return table; |
278 | } | |
279 | ||
80217147 MS |
280 | static void __page_table_free(struct mm_struct *mm, unsigned long *table) |
281 | { | |
282 | struct page *page; | |
283 | unsigned long bits; | |
284 | ||
285 | bits = ((unsigned long) table) & 15; | |
286 | table = (unsigned long *)(((unsigned long) table) ^ bits); | |
287 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
288 | page->flags ^= bits; | |
289 | if (!(page->flags & FRAG_MASK)) { | |
290 | pgtable_page_dtor(page); | |
291 | __free_page(page); | |
292 | } | |
293 | } | |
294 | ||
146e4b3c | 295 | void page_table_free(struct mm_struct *mm, unsigned long *table) |
3610cce8 | 296 | { |
146e4b3c MS |
297 | struct page *page; |
298 | unsigned long bits; | |
3610cce8 | 299 | |
043d0708 | 300 | bits = (mm->context.has_pgste) ? 3UL : 1UL; |
146e4b3c MS |
301 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); |
302 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
80217147 | 303 | spin_lock_bh(&mm->context.list_lock); |
146e4b3c MS |
304 | page->flags ^= bits; |
305 | if (page->flags & FRAG_MASK) { | |
306 | /* Page now has some free pgtable fragments. */ | |
f1be77bb MS |
307 | if (!list_empty(&page->lru)) |
308 | list_move(&page->lru, &mm->context.pgtable_list); | |
146e4b3c MS |
309 | page = NULL; |
310 | } else | |
311 | /* All fragments of the 4K page have been freed. */ | |
312 | list_del(&page->lru); | |
80217147 | 313 | spin_unlock_bh(&mm->context.list_lock); |
146e4b3c MS |
314 | if (page) { |
315 | pgtable_page_dtor(page); | |
316 | __free_page(page); | |
317 | } | |
318 | } | |
3610cce8 | 319 | |
80217147 MS |
320 | void page_table_free_rcu(struct mm_struct *mm, unsigned long *table) |
321 | { | |
322 | struct rcu_table_freelist *batch; | |
323 | struct page *page; | |
324 | unsigned long bits; | |
325 | ||
326 | if (atomic_read(&mm->mm_users) < 2 && | |
327 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { | |
328 | page_table_free(mm, table); | |
329 | return; | |
330 | } | |
331 | batch = rcu_table_freelist_get(mm); | |
332 | if (!batch) { | |
333 | smp_call_function(smp_sync, NULL, 1); | |
334 | page_table_free(mm, table); | |
335 | return; | |
336 | } | |
043d0708 | 337 | bits = (mm->context.has_pgste) ? 3UL : 1UL; |
80217147 MS |
338 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); |
339 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
340 | spin_lock_bh(&mm->context.list_lock); | |
341 | /* Delayed freeing with rcu prevents reuse of pgtable fragments */ | |
342 | list_del_init(&page->lru); | |
343 | spin_unlock_bh(&mm->context.list_lock); | |
344 | table = (unsigned long *)(((unsigned long) table) | bits); | |
345 | batch->table[batch->pgt_index++] = table; | |
346 | if (batch->pgt_index >= batch->crst_index) | |
347 | rcu_table_freelist_finish(); | |
348 | } | |
349 | ||
402b0862 CO |
350 | /* |
351 | * switch on pgstes for its userspace process (for kvm) | |
352 | */ | |
353 | int s390_enable_sie(void) | |
354 | { | |
355 | struct task_struct *tsk = current; | |
74b6b522 | 356 | struct mm_struct *mm, *old_mm; |
402b0862 | 357 | |
702d9e58 | 358 | /* Do we have switched amode? If no, we cannot do sie */ |
b11b5334 | 359 | if (user_mode == HOME_SPACE_MODE) |
702d9e58 CO |
360 | return -EINVAL; |
361 | ||
74b6b522 | 362 | /* Do we have pgstes? if yes, we are done */ |
250cf776 | 363 | if (tsk->mm->context.has_pgste) |
74b6b522 | 364 | return 0; |
402b0862 | 365 | |
74b6b522 CB |
366 | /* lets check if we are allowed to replace the mm */ |
367 | task_lock(tsk); | |
402b0862 | 368 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || |
52a21f2c MS |
369 | #ifdef CONFIG_AIO |
370 | !hlist_empty(&tsk->mm->ioctx_list) || | |
371 | #endif | |
372 | tsk->mm != tsk->active_mm) { | |
74b6b522 CB |
373 | task_unlock(tsk); |
374 | return -EINVAL; | |
375 | } | |
376 | task_unlock(tsk); | |
402b0862 | 377 | |
250cf776 CB |
378 | /* we copy the mm and let dup_mm create the page tables with_pgstes */ |
379 | tsk->mm->context.alloc_pgste = 1; | |
402b0862 | 380 | mm = dup_mm(tsk); |
250cf776 | 381 | tsk->mm->context.alloc_pgste = 0; |
402b0862 | 382 | if (!mm) |
74b6b522 CB |
383 | return -ENOMEM; |
384 | ||
250cf776 | 385 | /* Now lets check again if something happened */ |
74b6b522 CB |
386 | task_lock(tsk); |
387 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || | |
52a21f2c MS |
388 | #ifdef CONFIG_AIO |
389 | !hlist_empty(&tsk->mm->ioctx_list) || | |
390 | #endif | |
391 | tsk->mm != tsk->active_mm) { | |
74b6b522 CB |
392 | mmput(mm); |
393 | task_unlock(tsk); | |
394 | return -EINVAL; | |
395 | } | |
396 | ||
397 | /* ok, we are alone. No ptrace, no threads, etc. */ | |
398 | old_mm = tsk->mm; | |
402b0862 CO |
399 | tsk->mm = tsk->active_mm = mm; |
400 | preempt_disable(); | |
401 | update_mm(mm, tsk); | |
e05ef9bd CB |
402 | atomic_inc(&mm->context.attach_count); |
403 | atomic_dec(&old_mm->context.attach_count); | |
005f8eee | 404 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
402b0862 | 405 | preempt_enable(); |
402b0862 | 406 | task_unlock(tsk); |
74b6b522 CB |
407 | mmput(old_mm); |
408 | return 0; | |
402b0862 CO |
409 | } |
410 | EXPORT_SYMBOL_GPL(s390_enable_sie); | |
7db11a36 | 411 | |
87458ff4 | 412 | #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION) |
7db11a36 HJP |
413 | bool kernel_page_present(struct page *page) |
414 | { | |
415 | unsigned long addr; | |
416 | int cc; | |
417 | ||
418 | addr = page_to_phys(page); | |
87458ff4 HC |
419 | asm volatile( |
420 | " lra %1,0(%1)\n" | |
421 | " ipm %0\n" | |
422 | " srl %0,28" | |
423 | : "=d" (cc), "+a" (addr) : : "cc"); | |
7db11a36 HJP |
424 | return cc == 0; |
425 | } | |
87458ff4 | 426 | #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */ |