Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / asm-generic / pgtable.h
CommitLineData
1da177e4
LT
1#ifndef _ASM_GENERIC_PGTABLE_H
2#define _ASM_GENERIC_PGTABLE_H
3
673eae82
RR
4#ifndef __ASSEMBLY__
5
1da177e4
LT
6#ifndef __HAVE_ARCH_PTEP_ESTABLISH
7/*
8 * Establish a new mapping:
9 * - flush the old one
10 * - update the page tables
11 * - inform the TLB about the new one
12 *
b8072f09 13 * We hold the mm semaphore for reading, and the pte lock.
1da177e4
LT
14 *
15 * Note: the old pte is known to not be writable, so we don't need to
16 * worry about dirty bits etc getting lost.
17 */
1da177e4
LT
18#define ptep_establish(__vma, __address, __ptep, __entry) \
19do { \
20 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
21 flush_tlb_page(__vma, __address); \
22} while (0)
1da177e4
LT
23#endif
24
25#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
26/*
27 * Largely same as above, but only sets the access flags (dirty,
28 * accessed, and writable). Furthermore, we know it always gets set
29 * to a "more permissive" setting, which allows most architectures
8dab5241
BH
30 * to optimize this. We return whether the PTE actually changed, which
31 * in turn instructs the caller to do things like update__mmu_cache.
32 * This used to be done in the caller, but sparc needs minor faults to
33 * force that call on sun4c so we changed this macro slightly
1da177e4
LT
34 */
35#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
8dab5241
BH
36({ \
37 int __changed = !pte_same(*(__ptep), __entry); \
38 if (__changed) { \
39 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
40 flush_tlb_page(__vma, __address); \
41 } \
42 __changed; \
43})
1da177e4
LT
44#endif
45
46#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
47#define ptep_test_and_clear_young(__vma, __address, __ptep) \
48({ \
49 pte_t __pte = *(__ptep); \
50 int r = 1; \
51 if (!pte_young(__pte)) \
52 r = 0; \
53 else \
54 set_pte_at((__vma)->vm_mm, (__address), \
55 (__ptep), pte_mkold(__pte)); \
56 r; \
57})
58#endif
59
60#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
61#define ptep_clear_flush_young(__vma, __address, __ptep) \
62({ \
63 int __young; \
64 __young = ptep_test_and_clear_young(__vma, __address, __ptep); \
65 if (__young) \
66 flush_tlb_page(__vma, __address); \
67 __young; \
68})
69#endif
70
71#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
72#define ptep_test_and_clear_dirty(__vma, __address, __ptep) \
73({ \
74 pte_t __pte = *__ptep; \
75 int r = 1; \
76 if (!pte_dirty(__pte)) \
77 r = 0; \
78 else \
79 set_pte_at((__vma)->vm_mm, (__address), (__ptep), \
80 pte_mkclean(__pte)); \
81 r; \
82})
83#endif
84
85#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
86#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
87({ \
88 int __dirty; \
89 __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \
90 if (__dirty) \
91 flush_tlb_page(__vma, __address); \
92 __dirty; \
93})
94#endif
95
96#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
97#define ptep_get_and_clear(__mm, __address, __ptep) \
98({ \
99 pte_t __pte = *(__ptep); \
100 pte_clear((__mm), (__address), (__ptep)); \
101 __pte; \
102})
103#endif
104
a600388d
ZA
105#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
106#define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \
107({ \
108 pte_t __pte; \
109 __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \
110 __pte; \
111})
112#endif
113
9888a1ca
ZA
114/*
115 * Some architectures may be able to avoid expensive synchronization
116 * primitives when modifications are made to PTE's which are already
117 * not present, or in the process of an address space destruction.
118 */
119#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
120#define pte_clear_not_present_full(__mm, __address, __ptep, __full) \
a600388d
ZA
121do { \
122 pte_clear((__mm), (__address), (__ptep)); \
123} while (0)
124#endif
125
1da177e4
LT
126#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
127#define ptep_clear_flush(__vma, __address, __ptep) \
128({ \
129 pte_t __pte; \
130 __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
131 flush_tlb_page(__vma, __address); \
132 __pte; \
133})
134#endif
135
136#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
8c65b4a6 137struct mm_struct;
1da177e4
LT
138static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
139{
140 pte_t old_pte = *ptep;
141 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
142}
143#endif
144
145#ifndef __HAVE_ARCH_PTE_SAME
146#define pte_same(A,B) (pte_val(A) == pte_val(B))
147#endif
148
6c210482
MS
149#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
150#define page_test_dirty(page) (0)
151#endif
152
153#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
154#define page_clear_dirty(page) do { } while (0)
155#endif
156
157#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
b4955ce3
AK
158#define pte_maybe_dirty(pte) pte_dirty(pte)
159#else
160#define pte_maybe_dirty(pte) (1)
1da177e4
LT
161#endif
162
163#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
164#define page_test_and_clear_young(page) (0)
165#endif
166
167#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
168#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
169#endif
170
171#ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
172#define lazy_mmu_prot_update(pte) do { } while (0)
173#endif
174
0b0968a3 175#ifndef __HAVE_ARCH_MOVE_PTE
8b1f3124 176#define move_pte(pte, prot, old_addr, new_addr) (pte)
8b1f3124
NP
177#endif
178
6606c3e0
ZA
179/*
180 * A facility to provide lazy MMU batching. This allows PTE updates and
181 * page invalidations to be delayed until a call to leave lazy MMU mode
182 * is issued. Some architectures may benefit from doing this, and it is
183 * beneficial for both shadow and direct mode hypervisors, which may batch
184 * the PTE updates which happen during this window. Note that using this
185 * interface requires that read hazards be removed from the code. A read
186 * hazard could result in the direct mode hypervisor case, since the actual
187 * write to the page tables may not yet have taken place, so reads though
188 * a raw PTE pointer after it has been modified are not guaranteed to be
189 * up to date. This mode can only be entered and left under the protection of
190 * the page table locks for all page tables which may be modified. In the UP
191 * case, this is required so that preemption is disabled, and in the SMP case,
192 * it must synchronize the delayed page table writes properly on other CPUs.
193 */
194#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
195#define arch_enter_lazy_mmu_mode() do {} while (0)
196#define arch_leave_lazy_mmu_mode() do {} while (0)
49f19710 197#define arch_flush_lazy_mmu_mode() do {} while (0)
6606c3e0
ZA
198#endif
199
9226d125
ZA
200/*
201 * A facility to provide batching of the reload of page tables with the
202 * actual context switch code for paravirtualized guests. By convention,
203 * only one of the lazy modes (CPU, MMU) should be active at any given
204 * time, entry should never be nested, and entry and exits should always
205 * be paired. This is for sanity of maintaining and reasoning about the
206 * kernel code.
207 */
208#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE
209#define arch_enter_lazy_cpu_mode() do {} while (0)
210#define arch_leave_lazy_cpu_mode() do {} while (0)
49f19710 211#define arch_flush_lazy_cpu_mode() do {} while (0)
9226d125
ZA
212#endif
213
1da177e4 214/*
8f6c99c1
HD
215 * When walking page tables, get the address of the next boundary,
216 * or the end address of the range if that comes earlier. Although no
217 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
1da177e4
LT
218 */
219
1da177e4
LT
220#define pgd_addr_end(addr, end) \
221({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
222 (__boundary - 1 < (end) - 1)? __boundary: (end); \
223})
1da177e4
LT
224
225#ifndef pud_addr_end
226#define pud_addr_end(addr, end) \
227({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
228 (__boundary - 1 < (end) - 1)? __boundary: (end); \
229})
230#endif
231
232#ifndef pmd_addr_end
233#define pmd_addr_end(addr, end) \
234({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
235 (__boundary - 1 < (end) - 1)? __boundary: (end); \
236})
237#endif
238
1da177e4
LT
239/*
240 * When walking page tables, we usually want to skip any p?d_none entries;
241 * and any p?d_bad entries - reporting the error before resetting to none.
242 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
243 */
244void pgd_clear_bad(pgd_t *);
245void pud_clear_bad(pud_t *);
246void pmd_clear_bad(pmd_t *);
247
248static inline int pgd_none_or_clear_bad(pgd_t *pgd)
249{
250 if (pgd_none(*pgd))
251 return 1;
252 if (unlikely(pgd_bad(*pgd))) {
253 pgd_clear_bad(pgd);
254 return 1;
255 }
256 return 0;
257}
258
259static inline int pud_none_or_clear_bad(pud_t *pud)
260{
261 if (pud_none(*pud))
262 return 1;
263 if (unlikely(pud_bad(*pud))) {
264 pud_clear_bad(pud);
265 return 1;
266 }
267 return 0;
268}
269
270static inline int pmd_none_or_clear_bad(pmd_t *pmd)
271{
272 if (pmd_none(*pmd))
273 return 1;
274 if (unlikely(pmd_bad(*pmd))) {
275 pmd_clear_bad(pmd);
276 return 1;
277 }
278 return 0;
279}
280#endif /* !__ASSEMBLY__ */
281
282#endif /* _ASM_GENERIC_PGTABLE_H */