hugetlb: modular state for hugetlb page size
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / asm-s390 / hugetlb.h
1 /*
2 * IBM System z Huge TLB Page Support for Kernel.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
6 */
7
8 #ifndef _ASM_S390_HUGETLB_H
9 #define _ASM_S390_HUGETLB_H
10
11 #include <asm/page.h>
12 #include <asm/pgtable.h>
13
14
15 #define is_hugepage_only_range(mm, addr, len) 0
16 #define hugetlb_free_pgd_range free_pgd_range
17
18 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
19 pte_t *ptep, pte_t pte);
20
21 /*
22 * If the arch doesn't supply something else, assume that hugepage
23 * size aligned regions are ok without further preparation.
24 */
25 static inline int prepare_hugepage_range(struct file *file,
26 unsigned long addr, unsigned long len)
27 {
28 if (len & ~HPAGE_MASK)
29 return -EINVAL;
30 if (addr & ~HPAGE_MASK)
31 return -EINVAL;
32 return 0;
33 }
34
35 #define hugetlb_prefault_arch_hook(mm) do { } while (0)
36
37 int arch_prepare_hugepage(struct page *page);
38 void arch_release_hugepage(struct page *page);
39
40 static inline pte_t pte_mkhuge(pte_t pte)
41 {
42 /*
43 * PROT_NONE needs to be remapped from the pte type to the ste type.
44 * The HW invalid bit is also different for pte and ste. The pte
45 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
46 * bit, so we don't have to clear it.
47 */
48 if (pte_val(pte) & _PAGE_INVALID) {
49 if (pte_val(pte) & _PAGE_SWT)
50 pte_val(pte) |= _HPAGE_TYPE_NONE;
51 pte_val(pte) |= _SEGMENT_ENTRY_INV;
52 }
53 /*
54 * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
55 * table entry.
56 */
57 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
58 /*
59 * Also set the change-override bit because we don't need dirty bit
60 * tracking for hugetlbfs pages.
61 */
62 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
63 return pte;
64 }
65
66 static inline pte_t huge_pte_wrprotect(pte_t pte)
67 {
68 pte_val(pte) |= _PAGE_RO;
69 return pte;
70 }
71
72 static inline int huge_pte_none(pte_t pte)
73 {
74 return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
75 !(pte_val(pte) & _SEGMENT_ENTRY_RO);
76 }
77
78 static inline pte_t huge_ptep_get(pte_t *ptep)
79 {
80 pte_t pte = *ptep;
81 unsigned long mask;
82
83 if (!MACHINE_HAS_HPAGE) {
84 ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
85 if (ptep) {
86 mask = pte_val(pte) &
87 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
88 pte = pte_mkhuge(*ptep);
89 pte_val(pte) |= mask;
90 }
91 }
92 return pte;
93 }
94
95 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
96 unsigned long addr, pte_t *ptep)
97 {
98 pte_t pte = huge_ptep_get(ptep);
99
100 pmd_clear((pmd_t *) ptep);
101 return pte;
102 }
103
104 static inline void __pmd_csp(pmd_t *pmdp)
105 {
106 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
107 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
108 _SEGMENT_ENTRY_INV;
109 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
110
111 asm volatile(
112 " csp %1,%3"
113 : "=m" (*pmdp)
114 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
115 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
116 }
117
118 static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
119 {
120 unsigned long sto = (unsigned long) pmdp -
121 pmd_index(address) * sizeof(pmd_t);
122
123 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
124 asm volatile(
125 " .insn rrf,0xb98e0000,%2,%3,0,0"
126 : "=m" (*pmdp)
127 : "m" (*pmdp), "a" (sto),
128 "a" ((address & HPAGE_MASK))
129 );
130 }
131 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
132 }
133
134 static inline void huge_ptep_invalidate(struct mm_struct *mm,
135 unsigned long address, pte_t *ptep)
136 {
137 pmd_t *pmdp = (pmd_t *) ptep;
138
139 if (!MACHINE_HAS_IDTE) {
140 __pmd_csp(pmdp);
141 if (mm->context.noexec) {
142 pmdp = get_shadow_table(pmdp);
143 __pmd_csp(pmdp);
144 }
145 return;
146 }
147
148 __pmd_idte(address, pmdp);
149 if (mm->context.noexec) {
150 pmdp = get_shadow_table(pmdp);
151 __pmd_idte(address, pmdp);
152 }
153 return;
154 }
155
156 #define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
157 ({ \
158 int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
159 if (__changed) { \
160 huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
161 set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
162 } \
163 __changed; \
164 })
165
166 #define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
167 ({ \
168 pte_t __pte = huge_ptep_get(__ptep); \
169 if (pte_write(__pte)) { \
170 if (atomic_read(&(__mm)->mm_users) > 1 || \
171 (__mm) != current->active_mm) \
172 huge_ptep_invalidate(__mm, __addr, __ptep); \
173 set_huge_pte_at(__mm, __addr, __ptep, \
174 huge_pte_wrprotect(__pte)); \
175 } \
176 })
177
178 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
179 unsigned long address, pte_t *ptep)
180 {
181 huge_ptep_invalidate(vma->vm_mm, address, ptep);
182 }
183
184 #endif /* _ASM_S390_HUGETLB_H */