Merge branch 'x86/ptrace' into x86/tsc
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / s390 / include / asm / pgtable.h
1 /*
2 * include/asm-s390/pgtable.h
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Ulrich Weigand (weigand@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 *
10 * Derived from "include/asm-i386/pgtable.h"
11 */
12
13 #ifndef _ASM_S390_PGTABLE_H
14 #define _ASM_S390_PGTABLE_H
15
16 /*
17 * The Linux memory management assumes a three-level page table setup. For
18 * s390 31 bit we "fold" the mid level into the top-level page table, so
19 * that we physically have the same two-level page table as the s390 mmu
20 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
21 * the hardware provides (region first and region second tables are not
22 * used).
23 *
24 * The "pgd_xxx()" functions are trivial for a folded two-level
25 * setup: the pgd is never bad, and a pmd always exists (as it's folded
26 * into the pgd entry)
27 *
28 * This file contains the functions and defines necessary to modify and use
29 * the S390 page table tree.
30 */
31 #ifndef __ASSEMBLY__
32 #include <linux/sched.h>
33 #include <linux/mm_types.h>
34 #include <asm/bitops.h>
35 #include <asm/bug.h>
36 #include <asm/processor.h>
37
38 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
39 extern void paging_init(void);
40 extern void vmem_map_init(void);
41
42 /*
43 * The S390 doesn't have any external MMU info: the kernel page
44 * tables contain all the necessary information.
45 */
46 #define update_mmu_cache(vma, address, pte) do { } while (0)
47
48 /*
49 * ZERO_PAGE is a global shared page that is always zero: used
50 * for zero-mapped memory areas etc..
51 */
52 extern char empty_zero_page[PAGE_SIZE];
53 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
54 #endif /* !__ASSEMBLY__ */
55
56 /*
57 * PMD_SHIFT determines the size of the area a second-level page
58 * table can map
59 * PGDIR_SHIFT determines what a third-level page table entry can map
60 */
61 #ifndef __s390x__
62 # define PMD_SHIFT 20
63 # define PUD_SHIFT 20
64 # define PGDIR_SHIFT 20
65 #else /* __s390x__ */
66 # define PMD_SHIFT 20
67 # define PUD_SHIFT 31
68 # define PGDIR_SHIFT 42
69 #endif /* __s390x__ */
70
71 #define PMD_SIZE (1UL << PMD_SHIFT)
72 #define PMD_MASK (~(PMD_SIZE-1))
73 #define PUD_SIZE (1UL << PUD_SHIFT)
74 #define PUD_MASK (~(PUD_SIZE-1))
75 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
76 #define PGDIR_MASK (~(PGDIR_SIZE-1))
77
78 /*
79 * entries per page directory level: the S390 is two-level, so
80 * we don't really have any PMD directory physically.
81 * for S390 segment-table entries are combined to one PGD
82 * that leads to 1024 pte per pgd
83 */
84 #define PTRS_PER_PTE 256
85 #ifndef __s390x__
86 #define PTRS_PER_PMD 1
87 #define PTRS_PER_PUD 1
88 #else /* __s390x__ */
89 #define PTRS_PER_PMD 2048
90 #define PTRS_PER_PUD 2048
91 #endif /* __s390x__ */
92 #define PTRS_PER_PGD 2048
93
94 #define FIRST_USER_ADDRESS 0
95
96 #define pte_ERROR(e) \
97 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
98 #define pmd_ERROR(e) \
99 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
100 #define pud_ERROR(e) \
101 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
102 #define pgd_ERROR(e) \
103 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
104
105 #ifndef __ASSEMBLY__
106 /*
107 * The vmalloc area will always be on the topmost area of the kernel
108 * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc,
109 * which should be enough for any sane case.
110 * By putting vmalloc at the top, we maximise the gap between physical
111 * memory and vmalloc to catch misplaced memory accesses. As a side
112 * effect, this also makes sure that 64 bit module code cannot be used
113 * as system call address.
114 */
115 #ifndef __s390x__
116 #define VMALLOC_START 0x78000000UL
117 #define VMALLOC_END 0x7e000000UL
118 #define VMEM_MAP_END 0x80000000UL
119 #else /* __s390x__ */
120 #define VMALLOC_START 0x3e000000000UL
121 #define VMALLOC_END 0x3e040000000UL
122 #define VMEM_MAP_END 0x40000000000UL
123 #endif /* __s390x__ */
124
125 /*
126 * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1
127 * mapping. This needs to be calculated at compile time since the size of the
128 * VMEM_MAP is static but the size of struct page can change.
129 */
130 #define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
131 #define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
132 #define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
133 #define vmemmap ((struct page *) VMALLOC_END)
134
135 /*
136 * A 31 bit pagetable entry of S390 has following format:
137 * | PFRA | | OS |
138 * 0 0IP0
139 * 00000000001111111111222222222233
140 * 01234567890123456789012345678901
141 *
142 * I Page-Invalid Bit: Page is not available for address-translation
143 * P Page-Protection Bit: Store access not possible for page
144 *
145 * A 31 bit segmenttable entry of S390 has following format:
146 * | P-table origin | |PTL
147 * 0 IC
148 * 00000000001111111111222222222233
149 * 01234567890123456789012345678901
150 *
151 * I Segment-Invalid Bit: Segment is not available for address-translation
152 * C Common-Segment Bit: Segment is not private (PoP 3-30)
153 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
154 *
155 * The 31 bit segmenttable origin of S390 has following format:
156 *
157 * |S-table origin | | STL |
158 * X **GPS
159 * 00000000001111111111222222222233
160 * 01234567890123456789012345678901
161 *
162 * X Space-Switch event:
163 * G Segment-Invalid Bit: *
164 * P Private-Space Bit: Segment is not private (PoP 3-30)
165 * S Storage-Alteration:
166 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
167 *
168 * A 64 bit pagetable entry of S390 has following format:
169 * | PFRA |0IP0| OS |
170 * 0000000000111111111122222222223333333333444444444455555555556666
171 * 0123456789012345678901234567890123456789012345678901234567890123
172 *
173 * I Page-Invalid Bit: Page is not available for address-translation
174 * P Page-Protection Bit: Store access not possible for page
175 *
176 * A 64 bit segmenttable entry of S390 has following format:
177 * | P-table origin | TT
178 * 0000000000111111111122222222223333333333444444444455555555556666
179 * 0123456789012345678901234567890123456789012345678901234567890123
180 *
181 * I Segment-Invalid Bit: Segment is not available for address-translation
182 * C Common-Segment Bit: Segment is not private (PoP 3-30)
183 * P Page-Protection Bit: Store access not possible for page
184 * TT Type 00
185 *
186 * A 64 bit region table entry of S390 has following format:
187 * | S-table origin | TF TTTL
188 * 0000000000111111111122222222223333333333444444444455555555556666
189 * 0123456789012345678901234567890123456789012345678901234567890123
190 *
191 * I Segment-Invalid Bit: Segment is not available for address-translation
192 * TT Type 01
193 * TF
194 * TL Table length
195 *
196 * The 64 bit regiontable origin of S390 has following format:
197 * | region table origon | DTTL
198 * 0000000000111111111122222222223333333333444444444455555555556666
199 * 0123456789012345678901234567890123456789012345678901234567890123
200 *
201 * X Space-Switch event:
202 * G Segment-Invalid Bit:
203 * P Private-Space Bit:
204 * S Storage-Alteration:
205 * R Real space
206 * TL Table-Length:
207 *
208 * A storage key has the following format:
209 * | ACC |F|R|C|0|
210 * 0 3 4 5 6 7
211 * ACC: access key
212 * F : fetch protection bit
213 * R : referenced bit
214 * C : changed bit
215 */
216
217 /* Hardware bits in the page table entry */
218 #define _PAGE_RO 0x200 /* HW read-only bit */
219 #define _PAGE_INVALID 0x400 /* HW invalid bit */
220
221 /* Software bits in the page table entry */
222 #define _PAGE_SWT 0x001 /* SW pte type bit t */
223 #define _PAGE_SWX 0x002 /* SW pte type bit x */
224 #define _PAGE_SPECIAL 0x004 /* SW associated with special page */
225 #define __HAVE_ARCH_PTE_SPECIAL
226
227 /* Set of bits not changed in pte_modify */
228 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL)
229
230 /* Six different types of pages. */
231 #define _PAGE_TYPE_EMPTY 0x400
232 #define _PAGE_TYPE_NONE 0x401
233 #define _PAGE_TYPE_SWAP 0x403
234 #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
235 #define _PAGE_TYPE_RO 0x200
236 #define _PAGE_TYPE_RW 0x000
237 #define _PAGE_TYPE_EX_RO 0x202
238 #define _PAGE_TYPE_EX_RW 0x002
239
240 /*
241 * Only four types for huge pages, using the invalid bit and protection bit
242 * of a segment table entry.
243 */
244 #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
245 #define _HPAGE_TYPE_NONE 0x220
246 #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
247 #define _HPAGE_TYPE_RW 0x000
248
249 /*
250 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
251 * pte_none and pte_file to find out the pte type WITHOUT holding the page
252 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
253 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
254 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
255 * This change is done while holding the lock, but the intermediate step
256 * of a previously valid pte with the hw invalid bit set can be observed by
257 * handle_pte_fault. That makes it necessary that all valid pte types with
258 * the hw invalid bit set must be distinguishable from the four pte types
259 * empty, none, swap and file.
260 *
261 * irxt ipte irxt
262 * _PAGE_TYPE_EMPTY 1000 -> 1000
263 * _PAGE_TYPE_NONE 1001 -> 1001
264 * _PAGE_TYPE_SWAP 1011 -> 1011
265 * _PAGE_TYPE_FILE 11?1 -> 11?1
266 * _PAGE_TYPE_RO 0100 -> 1100
267 * _PAGE_TYPE_RW 0000 -> 1000
268 * _PAGE_TYPE_EX_RO 0110 -> 1110
269 * _PAGE_TYPE_EX_RW 0010 -> 1010
270 *
271 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
272 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
273 * pte_file is true for bits combinations 1101, 1111
274 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
275 */
276
277 /* Page status table bits for virtualization */
278 #define RCP_PCL_BIT 55
279 #define RCP_HR_BIT 54
280 #define RCP_HC_BIT 53
281 #define RCP_GR_BIT 50
282 #define RCP_GC_BIT 49
283
284 /* User dirty bit for KVM's migration feature */
285 #define KVM_UD_BIT 47
286
287 #ifndef __s390x__
288
289 /* Bits in the segment table address-space-control-element */
290 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
291 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
292 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
293 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
294 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
295
296 /* Bits in the segment table entry */
297 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
298 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
299 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
300 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
301
302 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
303 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
304
305 #else /* __s390x__ */
306
307 /* Bits in the segment/region table address-space-control-element */
308 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
309 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
310 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
311 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
312 #define _ASCE_REAL_SPACE 0x20 /* real space control */
313 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
314 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
315 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
316 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
317 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
318 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
319
320 /* Bits in the region table entry */
321 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
322 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
323 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
324 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
325 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
326 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
327 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
328
329 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
330 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
331 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
332 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
333 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
334 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
335
336 /* Bits in the segment table entry */
337 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
338 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
339 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
340
341 #define _SEGMENT_ENTRY (0)
342 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
343
344 #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
345 #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
346
347 #endif /* __s390x__ */
348
349 /*
350 * A user page table pointer has the space-switch-event bit, the
351 * private-space-control bit and the storage-alteration-event-control
352 * bit set. A kernel page table pointer doesn't need them.
353 */
354 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
355 _ASCE_ALT_EVENT)
356
357 /* Bits int the storage key */
358 #define _PAGE_CHANGED 0x02 /* HW changed bit */
359 #define _PAGE_REFERENCED 0x04 /* HW referenced bit */
360
361 /*
362 * Page protection definitions.
363 */
364 #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
365 #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
366 #define PAGE_RW __pgprot(_PAGE_TYPE_RW)
367 #define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
368 #define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
369
370 #define PAGE_KERNEL PAGE_RW
371 #define PAGE_COPY PAGE_RO
372
373 /*
374 * Dependent on the EXEC_PROTECT option s390 can do execute protection.
375 * Write permission always implies read permission. In theory with a
376 * primary/secondary page table execute only can be implemented but
377 * it would cost an additional bit in the pte to distinguish all the
378 * different pte types. To avoid that execute permission currently
379 * implies read permission as well.
380 */
381 /*xwr*/
382 #define __P000 PAGE_NONE
383 #define __P001 PAGE_RO
384 #define __P010 PAGE_RO
385 #define __P011 PAGE_RO
386 #define __P100 PAGE_EX_RO
387 #define __P101 PAGE_EX_RO
388 #define __P110 PAGE_EX_RO
389 #define __P111 PAGE_EX_RO
390
391 #define __S000 PAGE_NONE
392 #define __S001 PAGE_RO
393 #define __S010 PAGE_RW
394 #define __S011 PAGE_RW
395 #define __S100 PAGE_EX_RO
396 #define __S101 PAGE_EX_RO
397 #define __S110 PAGE_EX_RW
398 #define __S111 PAGE_EX_RW
399
400 #ifndef __s390x__
401 # define PxD_SHADOW_SHIFT 1
402 #else /* __s390x__ */
403 # define PxD_SHADOW_SHIFT 2
404 #endif /* __s390x__ */
405
406 static inline void *get_shadow_table(void *table)
407 {
408 unsigned long addr, offset;
409 struct page *page;
410
411 addr = (unsigned long) table;
412 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
413 page = virt_to_page((void *)(addr ^ offset));
414 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
415 }
416
417 /*
418 * Certain architectures need to do special things when PTEs
419 * within a page table are directly modified. Thus, the following
420 * hook is made available.
421 */
422 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
423 pte_t *ptep, pte_t entry)
424 {
425 *ptep = entry;
426 if (mm->context.noexec) {
427 if (!(pte_val(entry) & _PAGE_INVALID) &&
428 (pte_val(entry) & _PAGE_SWX))
429 pte_val(entry) |= _PAGE_RO;
430 else
431 pte_val(entry) = _PAGE_TYPE_EMPTY;
432 ptep[PTRS_PER_PTE] = entry;
433 }
434 }
435
436 /*
437 * pgd/pmd/pte query functions
438 */
439 #ifndef __s390x__
440
441 static inline int pgd_present(pgd_t pgd) { return 1; }
442 static inline int pgd_none(pgd_t pgd) { return 0; }
443 static inline int pgd_bad(pgd_t pgd) { return 0; }
444
445 static inline int pud_present(pud_t pud) { return 1; }
446 static inline int pud_none(pud_t pud) { return 0; }
447 static inline int pud_bad(pud_t pud) { return 0; }
448
449 #else /* __s390x__ */
450
451 static inline int pgd_present(pgd_t pgd)
452 {
453 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
454 return 1;
455 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
456 }
457
458 static inline int pgd_none(pgd_t pgd)
459 {
460 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
461 return 0;
462 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
463 }
464
465 static inline int pgd_bad(pgd_t pgd)
466 {
467 /*
468 * With dynamic page table levels the pgd can be a region table
469 * entry or a segment table entry. Check for the bit that are
470 * invalid for either table entry.
471 */
472 unsigned long mask =
473 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
474 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
475 return (pgd_val(pgd) & mask) != 0;
476 }
477
478 static inline int pud_present(pud_t pud)
479 {
480 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
481 return 1;
482 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
483 }
484
485 static inline int pud_none(pud_t pud)
486 {
487 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
488 return 0;
489 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
490 }
491
492 static inline int pud_bad(pud_t pud)
493 {
494 /*
495 * With dynamic page table levels the pud can be a region table
496 * entry or a segment table entry. Check for the bit that are
497 * invalid for either table entry.
498 */
499 unsigned long mask =
500 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
501 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
502 return (pud_val(pud) & mask) != 0;
503 }
504
505 #endif /* __s390x__ */
506
507 static inline int pmd_present(pmd_t pmd)
508 {
509 return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
510 }
511
512 static inline int pmd_none(pmd_t pmd)
513 {
514 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
515 }
516
517 static inline int pmd_bad(pmd_t pmd)
518 {
519 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
520 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
521 }
522
523 static inline int pte_none(pte_t pte)
524 {
525 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
526 }
527
528 static inline int pte_present(pte_t pte)
529 {
530 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
531 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
532 (!(pte_val(pte) & _PAGE_INVALID) &&
533 !(pte_val(pte) & _PAGE_SWT));
534 }
535
536 static inline int pte_file(pte_t pte)
537 {
538 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
539 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
540 }
541
542 static inline int pte_special(pte_t pte)
543 {
544 return (pte_val(pte) & _PAGE_SPECIAL);
545 }
546
547 #define __HAVE_ARCH_PTE_SAME
548 #define pte_same(a,b) (pte_val(a) == pte_val(b))
549
550 static inline void rcp_lock(pte_t *ptep)
551 {
552 #ifdef CONFIG_PGSTE
553 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
554 preempt_disable();
555 while (test_and_set_bit(RCP_PCL_BIT, pgste))
556 ;
557 #endif
558 }
559
560 static inline void rcp_unlock(pte_t *ptep)
561 {
562 #ifdef CONFIG_PGSTE
563 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
564 clear_bit(RCP_PCL_BIT, pgste);
565 preempt_enable();
566 #endif
567 }
568
569 /* forward declaration for SetPageUptodate in page-flags.h*/
570 static inline void page_clear_dirty(struct page *page);
571 #include <linux/page-flags.h>
572
573 static inline void ptep_rcp_copy(pte_t *ptep)
574 {
575 #ifdef CONFIG_PGSTE
576 struct page *page = virt_to_page(pte_val(*ptep));
577 unsigned int skey;
578 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
579
580 skey = page_get_storage_key(page_to_phys(page));
581 if (skey & _PAGE_CHANGED) {
582 set_bit_simple(RCP_GC_BIT, pgste);
583 set_bit_simple(KVM_UD_BIT, pgste);
584 }
585 if (skey & _PAGE_REFERENCED)
586 set_bit_simple(RCP_GR_BIT, pgste);
587 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
588 SetPageDirty(page);
589 set_bit_simple(KVM_UD_BIT, pgste);
590 }
591 if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
592 SetPageReferenced(page);
593 #endif
594 }
595
596 /*
597 * query functions pte_write/pte_dirty/pte_young only work if
598 * pte_present() is true. Undefined behaviour if not..
599 */
600 static inline int pte_write(pte_t pte)
601 {
602 return (pte_val(pte) & _PAGE_RO) == 0;
603 }
604
605 static inline int pte_dirty(pte_t pte)
606 {
607 /* A pte is neither clean nor dirty on s/390. The dirty bit
608 * is in the storage key. See page_test_and_clear_dirty for
609 * details.
610 */
611 return 0;
612 }
613
614 static inline int pte_young(pte_t pte)
615 {
616 /* A pte is neither young nor old on s/390. The young bit
617 * is in the storage key. See page_test_and_clear_young for
618 * details.
619 */
620 return 0;
621 }
622
623 /*
624 * pgd/pmd/pte modification functions
625 */
626
627 #ifndef __s390x__
628
629 #define pgd_clear(pgd) do { } while (0)
630 #define pud_clear(pud) do { } while (0)
631
632 #else /* __s390x__ */
633
634 static inline void pgd_clear_kernel(pgd_t * pgd)
635 {
636 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
637 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
638 }
639
640 static inline void pgd_clear(pgd_t * pgd)
641 {
642 pgd_t *shadow = get_shadow_table(pgd);
643
644 pgd_clear_kernel(pgd);
645 if (shadow)
646 pgd_clear_kernel(shadow);
647 }
648
649 static inline void pud_clear_kernel(pud_t *pud)
650 {
651 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
652 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
653 }
654
655 static inline void pud_clear(pud_t *pud)
656 {
657 pud_t *shadow = get_shadow_table(pud);
658
659 pud_clear_kernel(pud);
660 if (shadow)
661 pud_clear_kernel(shadow);
662 }
663
664 #endif /* __s390x__ */
665
666 static inline void pmd_clear_kernel(pmd_t * pmdp)
667 {
668 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
669 }
670
671 static inline void pmd_clear(pmd_t *pmd)
672 {
673 pmd_t *shadow = get_shadow_table(pmd);
674
675 pmd_clear_kernel(pmd);
676 if (shadow)
677 pmd_clear_kernel(shadow);
678 }
679
680 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
681 {
682 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
683 if (mm->context.noexec)
684 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
685 }
686
687 /*
688 * The following pte modification functions only work if
689 * pte_present() is true. Undefined behaviour if not..
690 */
691 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
692 {
693 pte_val(pte) &= _PAGE_CHG_MASK;
694 pte_val(pte) |= pgprot_val(newprot);
695 return pte;
696 }
697
698 static inline pte_t pte_wrprotect(pte_t pte)
699 {
700 /* Do not clobber _PAGE_TYPE_NONE pages! */
701 if (!(pte_val(pte) & _PAGE_INVALID))
702 pte_val(pte) |= _PAGE_RO;
703 return pte;
704 }
705
706 static inline pte_t pte_mkwrite(pte_t pte)
707 {
708 pte_val(pte) &= ~_PAGE_RO;
709 return pte;
710 }
711
712 static inline pte_t pte_mkclean(pte_t pte)
713 {
714 /* The only user of pte_mkclean is the fork() code.
715 We must *not* clear the *physical* page dirty bit
716 just because fork() wants to clear the dirty bit in
717 *one* of the page's mappings. So we just do nothing. */
718 return pte;
719 }
720
721 static inline pte_t pte_mkdirty(pte_t pte)
722 {
723 /* We do not explicitly set the dirty bit because the
724 * sske instruction is slow. It is faster to let the
725 * next instruction set the dirty bit.
726 */
727 return pte;
728 }
729
730 static inline pte_t pte_mkold(pte_t pte)
731 {
732 /* S/390 doesn't keep its dirty/referenced bit in the pte.
733 * There is no point in clearing the real referenced bit.
734 */
735 return pte;
736 }
737
738 static inline pte_t pte_mkyoung(pte_t pte)
739 {
740 /* S/390 doesn't keep its dirty/referenced bit in the pte.
741 * There is no point in setting the real referenced bit.
742 */
743 return pte;
744 }
745
746 static inline pte_t pte_mkspecial(pte_t pte)
747 {
748 pte_val(pte) |= _PAGE_SPECIAL;
749 return pte;
750 }
751
752 #ifdef CONFIG_PGSTE
753 /*
754 * Get (and clear) the user dirty bit for a PTE.
755 */
756 static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
757 pte_t *ptep)
758 {
759 int dirty;
760 unsigned long *pgste;
761 struct page *page;
762 unsigned int skey;
763
764 if (!mm->context.has_pgste)
765 return -EINVAL;
766 rcp_lock(ptep);
767 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
768 page = virt_to_page(pte_val(*ptep));
769 skey = page_get_storage_key(page_to_phys(page));
770 if (skey & _PAGE_CHANGED) {
771 set_bit_simple(RCP_GC_BIT, pgste);
772 set_bit_simple(KVM_UD_BIT, pgste);
773 }
774 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
775 SetPageDirty(page);
776 set_bit_simple(KVM_UD_BIT, pgste);
777 }
778 dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
779 if (skey & _PAGE_CHANGED)
780 page_clear_dirty(page);
781 rcp_unlock(ptep);
782 return dirty;
783 }
784 #endif
785
786 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
787 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
788 unsigned long addr, pte_t *ptep)
789 {
790 #ifdef CONFIG_PGSTE
791 unsigned long physpage;
792 int young;
793 unsigned long *pgste;
794
795 if (!vma->vm_mm->context.has_pgste)
796 return 0;
797 physpage = pte_val(*ptep) & PAGE_MASK;
798 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
799
800 young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
801 rcp_lock(ptep);
802 if (young)
803 set_bit_simple(RCP_GR_BIT, pgste);
804 young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
805 rcp_unlock(ptep);
806 return young;
807 #endif
808 return 0;
809 }
810
811 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
812 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
813 unsigned long address, pte_t *ptep)
814 {
815 /* No need to flush TLB
816 * On s390 reference bits are in storage key and never in TLB
817 * With virtualization we handle the reference bit, without we
818 * we can simply return */
819 #ifdef CONFIG_PGSTE
820 return ptep_test_and_clear_young(vma, address, ptep);
821 #endif
822 return 0;
823 }
824
825 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
826 {
827 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
828 #ifndef __s390x__
829 /* pto must point to the start of the segment table */
830 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
831 #else
832 /* ipte in zarch mode can do the math */
833 pte_t *pto = ptep;
834 #endif
835 asm volatile(
836 " ipte %2,%3"
837 : "=m" (*ptep) : "m" (*ptep),
838 "a" (pto), "a" (address));
839 }
840 }
841
842 static inline void ptep_invalidate(struct mm_struct *mm,
843 unsigned long address, pte_t *ptep)
844 {
845 if (mm->context.has_pgste) {
846 rcp_lock(ptep);
847 __ptep_ipte(address, ptep);
848 ptep_rcp_copy(ptep);
849 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
850 rcp_unlock(ptep);
851 return;
852 }
853 __ptep_ipte(address, ptep);
854 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
855 if (mm->context.noexec) {
856 __ptep_ipte(address, ptep + PTRS_PER_PTE);
857 pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
858 }
859 }
860
861 /*
862 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
863 * both clear the TLB for the unmapped pte. The reason is that
864 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
865 * to modify an active pte. The sequence is
866 * 1) ptep_get_and_clear
867 * 2) set_pte_at
868 * 3) flush_tlb_range
869 * On s390 the tlb needs to get flushed with the modification of the pte
870 * if the pte is active. The only way how this can be implemented is to
871 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
872 * is a nop.
873 */
874 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
875 #define ptep_get_and_clear(__mm, __address, __ptep) \
876 ({ \
877 pte_t __pte = *(__ptep); \
878 if (atomic_read(&(__mm)->mm_users) > 1 || \
879 (__mm) != current->active_mm) \
880 ptep_invalidate(__mm, __address, __ptep); \
881 else \
882 pte_clear((__mm), (__address), (__ptep)); \
883 __pte; \
884 })
885
886 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
887 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
888 unsigned long address, pte_t *ptep)
889 {
890 pte_t pte = *ptep;
891 ptep_invalidate(vma->vm_mm, address, ptep);
892 return pte;
893 }
894
895 /*
896 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
897 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
898 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
899 * cannot be accessed while the batched unmap is running. In this case
900 * full==1 and a simple pte_clear is enough. See tlb.h.
901 */
902 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
903 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
904 unsigned long addr,
905 pte_t *ptep, int full)
906 {
907 pte_t pte = *ptep;
908
909 if (full)
910 pte_clear(mm, addr, ptep);
911 else
912 ptep_invalidate(mm, addr, ptep);
913 return pte;
914 }
915
916 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
917 #define ptep_set_wrprotect(__mm, __addr, __ptep) \
918 ({ \
919 pte_t __pte = *(__ptep); \
920 if (pte_write(__pte)) { \
921 if (atomic_read(&(__mm)->mm_users) > 1 || \
922 (__mm) != current->active_mm) \
923 ptep_invalidate(__mm, __addr, __ptep); \
924 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
925 } \
926 })
927
928 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
929 #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
930 ({ \
931 int __changed = !pte_same(*(__ptep), __entry); \
932 if (__changed) { \
933 ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
934 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
935 } \
936 __changed; \
937 })
938
939 /*
940 * Test and clear dirty bit in storage key.
941 * We can't clear the changed bit atomically. This is a potential
942 * race against modification of the referenced bit. This function
943 * should therefore only be called if it is not mapped in any
944 * address space.
945 */
946 #define __HAVE_ARCH_PAGE_TEST_DIRTY
947 static inline int page_test_dirty(struct page *page)
948 {
949 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
950 }
951
952 #define __HAVE_ARCH_PAGE_CLEAR_DIRTY
953 static inline void page_clear_dirty(struct page *page)
954 {
955 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
956 }
957
958 /*
959 * Test and clear referenced bit in storage key.
960 */
961 #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
962 static inline int page_test_and_clear_young(struct page *page)
963 {
964 unsigned long physpage = page_to_phys(page);
965 int ccode;
966
967 asm volatile(
968 " rrbe 0,%1\n"
969 " ipm %0\n"
970 " srl %0,28\n"
971 : "=d" (ccode) : "a" (physpage) : "cc" );
972 return ccode & 2;
973 }
974
975 /*
976 * Conversion functions: convert a page and protection to a page entry,
977 * and a page entry and page directory to the page they refer to.
978 */
979 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
980 {
981 pte_t __pte;
982 pte_val(__pte) = physpage + pgprot_val(pgprot);
983 return __pte;
984 }
985
986 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
987 {
988 unsigned long physpage = page_to_phys(page);
989
990 return mk_pte_phys(physpage, pgprot);
991 }
992
993 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
994 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
995 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
996 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
997
998 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
999 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1000
1001 #ifndef __s390x__
1002
1003 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1004 #define pud_deref(pmd) ({ BUG(); 0UL; })
1005 #define pgd_deref(pmd) ({ BUG(); 0UL; })
1006
1007 #define pud_offset(pgd, address) ((pud_t *) pgd)
1008 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1009
1010 #else /* __s390x__ */
1011
1012 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1013 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1014 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1015
1016 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1017 {
1018 pud_t *pud = (pud_t *) pgd;
1019 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1020 pud = (pud_t *) pgd_deref(*pgd);
1021 return pud + pud_index(address);
1022 }
1023
1024 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1025 {
1026 pmd_t *pmd = (pmd_t *) pud;
1027 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1028 pmd = (pmd_t *) pud_deref(*pud);
1029 return pmd + pmd_index(address);
1030 }
1031
1032 #endif /* __s390x__ */
1033
1034 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1035 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1036 #define pte_page(x) pfn_to_page(pte_pfn(x))
1037
1038 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1039
1040 /* Find an entry in the lowest level page table.. */
1041 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1042 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1043 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1044 #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
1045 #define pte_unmap(pte) do { } while (0)
1046 #define pte_unmap_nested(pte) do { } while (0)
1047
1048 /*
1049 * 31 bit swap entry format:
1050 * A page-table entry has some bits we have to treat in a special way.
1051 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1052 * exception will occur instead of a page translation exception. The
1053 * specifiation exception has the bad habit not to store necessary
1054 * information in the lowcore.
1055 * Bit 21 and bit 22 are the page invalid bit and the page protection
1056 * bit. We set both to indicate a swapped page.
1057 * Bit 30 and 31 are used to distinguish the different page types. For
1058 * a swapped page these bits need to be zero.
1059 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1060 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1061 * plus 24 for the offset.
1062 * 0| offset |0110|o|type |00|
1063 * 0 0000000001111111111 2222 2 22222 33
1064 * 0 1234567890123456789 0123 4 56789 01
1065 *
1066 * 64 bit swap entry format:
1067 * A page-table entry has some bits we have to treat in a special way.
1068 * Bits 52 and bit 55 have to be zero, otherwise an specification
1069 * exception will occur instead of a page translation exception. The
1070 * specifiation exception has the bad habit not to store necessary
1071 * information in the lowcore.
1072 * Bit 53 and bit 54 are the page invalid bit and the page protection
1073 * bit. We set both to indicate a swapped page.
1074 * Bit 62 and 63 are used to distinguish the different page types. For
1075 * a swapped page these bits need to be zero.
1076 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1077 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1078 * plus 56 for the offset.
1079 * | offset |0110|o|type |00|
1080 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1081 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1082 */
1083 #ifndef __s390x__
1084 #define __SWP_OFFSET_MASK (~0UL >> 12)
1085 #else
1086 #define __SWP_OFFSET_MASK (~0UL >> 11)
1087 #endif
1088 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1089 {
1090 pte_t pte;
1091 offset &= __SWP_OFFSET_MASK;
1092 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
1093 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1094 return pte;
1095 }
1096
1097 #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1098 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1099 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1100
1101 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1102 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1103
1104 #ifndef __s390x__
1105 # define PTE_FILE_MAX_BITS 26
1106 #else /* __s390x__ */
1107 # define PTE_FILE_MAX_BITS 59
1108 #endif /* __s390x__ */
1109
1110 #define pte_to_pgoff(__pte) \
1111 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1112
1113 #define pgoff_to_pte(__off) \
1114 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1115 | _PAGE_TYPE_FILE })
1116
1117 #endif /* !__ASSEMBLY__ */
1118
1119 #define kern_addr_valid(addr) (1)
1120
1121 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1122 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1123 extern int s390_enable_sie(void);
1124
1125 /*
1126 * No page table caches to initialise
1127 */
1128 #define pgtable_cache_init() do { } while (0)
1129
1130 #include <asm-generic/pgtable.h>
1131
1132 #endif /* _S390_PAGE_H */