drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / include / asm / pgtable.h
CommitLineData
1da177e4 1/*
4baa9922 2 * arch/arm/include/asm/pgtable.h
1da177e4
LT
3 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_PGTABLE_H
11#define _ASMARM_PGTABLE_H
12
f6e3354d 13#include <linux/const.h>
002547b4
RK
14#include <asm/proc-fns.h>
15
16#ifndef CONFIG_MMU
17
a32618d2 18#include <asm-generic/4level-fixup.h>
a1ce3928 19#include <asm/pgtable-nommu.h>
002547b4
RK
20
21#else
1da177e4 22
a32618d2 23#include <asm-generic/pgtable-nopud.h>
1da177e4 24#include <asm/memory.h>
ad1ae2fe 25#include <asm/pgtable-hwdef.h>
1da177e4 26
dcfdae04
CM
27#ifdef CONFIG_ARM_LPAE
28#include <asm/pgtable-3level.h>
29#else
17f57211 30#include <asm/pgtable-2level.h>
dcfdae04 31#endif
17f57211 32
5c3073e6
RK
33/*
34 * Just any arbitrary offset to the start of the vmalloc VM area: the
35 * current 8MB value just means that there will be a 8MB "hole" after the
36 * physical memory until the kernel virtual memory starts. That means that
37 * any out-of-bounds memory accesses will hopefully be caught.
38 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
39 * area for the same reason. ;)
5c3073e6 40 */
5c3073e6
RK
41#define VMALLOC_OFFSET (8*1024*1024)
42#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
0536bdf3 43#define VMALLOC_END 0xff000000UL
5c3073e6 44
1da177e4
LT
45#define LIBRARY_TEXT_START 0x0c000000
46
47#ifndef __ASSEMBLY__
69529c0e
RK
48extern void __pte_error(const char *file, int line, pte_t);
49extern void __pmd_error(const char *file, int line, pmd_t);
50extern void __pgd_error(const char *file, int line, pgd_t);
1da177e4 51
69529c0e
RK
52#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
53#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
54#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1da177e4 55
6119be0b
HD
56/*
57 * This is the lowest virtual address we can permit any user space
58 * mapping to be mapped at. This is particularly important for
59 * non-high vector CPUs.
60 */
d47787bd 61#define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
6119be0b 62
104ad3b3
CM
63/*
64 * Use TASK_SIZE as the ceiling argument for free_pgtables() and
65 * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
66 * page shared between user and kernel).
67 */
68#ifdef CONFIG_ARM_LPAE
69#define USER_PGTABLES_CEILING TASK_SIZE
70#endif
71
1da177e4 72/*
44b18693
I
73 * The pgprot_* and protection_map entries will be fixed up in runtime
74 * to include the cachable and bufferable bits based on memory policy,
75 * as well as any architecture dependent bits like global/ASID and SMP
76 * shared mapping bits.
1da177e4 77 */
bb30f36f 78#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
1da177e4 79
44b18693 80extern pgprot_t pgprot_user;
1da177e4 81extern pgprot_t pgprot_kernel;
cc577c26
CD
82extern pgprot_t pgprot_hyp_device;
83extern pgprot_t pgprot_s2;
84extern pgprot_t pgprot_s2_device;
1da177e4 85
8ec53663 86#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
1da177e4 87
26ffd0d4 88#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
36bb94ba
RK
89#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
90#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
91#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
92#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
93#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
94#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
9522d7e4
RK
95#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
96#define PAGE_KERNEL_EXEC pgprot_kernel
cc577c26
CD
97#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
98#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
99#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
100#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY)
9522d7e4 101
26ffd0d4 102#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
36bb94ba
RK
103#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
104#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
105#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
106#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
107#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
108#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
44b18693 109
eb9b2b69
RK
110#define __pgprot_modify(prot,mask,bits) \
111 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
112
113#define pgprot_noncached(prot) \
114 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
115
116#define pgprot_writecombine(prot) \
117 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
118
8fb54284
SS
119#define pgprot_stronglyordered(prot) \
120 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
121
eb9b2b69
RK
122#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
123#define pgprot_dmacoherent(prot) \
9522d7e4 124 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
eb9b2b69
RK
125#define __HAVE_PHYS_MEM_ACCESS_PROT
126struct file;
127extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
128 unsigned long size, pgprot_t vma_prot);
129#else
130#define pgprot_dmacoherent(prot) \
9522d7e4 131 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
eb9b2b69
RK
132#endif
133
1da177e4
LT
134#endif /* __ASSEMBLY__ */
135
136/*
137 * The table below defines the page protection levels that we insert into our
138 * Linux page table version. These get translated into the best that the
139 * architecture can perform. Note that on most ARM hardware:
140 * 1) We cannot do execute protection
141 * 2) If we could do execute protection, then read is implied
142 * 3) write implies read permissions
143 */
44b18693
I
144#define __P000 __PAGE_NONE
145#define __P001 __PAGE_READONLY
146#define __P010 __PAGE_COPY
147#define __P011 __PAGE_COPY
8ec53663
RK
148#define __P100 __PAGE_READONLY_EXEC
149#define __P101 __PAGE_READONLY_EXEC
150#define __P110 __PAGE_COPY_EXEC
151#define __P111 __PAGE_COPY_EXEC
44b18693
I
152
153#define __S000 __PAGE_NONE
154#define __S001 __PAGE_READONLY
155#define __S010 __PAGE_SHARED
156#define __S011 __PAGE_SHARED
8ec53663
RK
157#define __S100 __PAGE_READONLY_EXEC
158#define __S101 __PAGE_READONLY_EXEC
159#define __S110 __PAGE_SHARED_EXEC
160#define __S111 __PAGE_SHARED_EXEC
1da177e4
LT
161
162#ifndef __ASSEMBLY__
163/*
164 * ZERO_PAGE is a global shared page that is always zero: used
165 * for zero-mapped memory areas etc..
166 */
167extern struct page *empty_zero_page;
168#define ZERO_PAGE(vaddr) (empty_zero_page)
169
4eec4b13
RK
170
171extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
172
173/* to find an entry in a page-table-directory */
174#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
175
176#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
177
178/* to find an entry in a kernel page-table-directory */
179#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
180
b510b049
RK
181#define pmd_none(pmd) (!pmd_val(pmd))
182#define pmd_present(pmd) (pmd_val(pmd))
b510b049
RK
183
184static inline pte_t *pmd_page_vaddr(pmd_t pmd)
185{
d7c5d0dc 186 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
b510b049
RK
187}
188
d7c5d0dc 189#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
b510b049 190
65cec8e3 191#ifndef CONFIG_HIGHPTE
b510b049 192#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
ece0e2b6 193#define __pte_unmap(pte) do { } while (0)
65cec8e3 194#else
d30e45ee
RK
195#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
196#define __pte_unmap(pte) kunmap_atomic(pte)
65cec8e3 197#endif
1da177e4 198
b510b049
RK
199#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
200
201#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
202
203#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
204#define pte_unmap(pte) __pte_unmap(pte)
205
d7c5d0dc 206#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
cae6292b 207#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
b510b049
RK
208
209#define pte_page(pte) pfn_to_page(pte_pfn(pte))
210#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
211
b510b049 212#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
ad1ae2fe 213
5012ebf4
SC
214#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \
215 : !!(pte_val(pte) & (val)))
216#define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
217
47f12043 218#define pte_none(pte) (!pte_val(pte))
5012ebf4
SC
219#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT))
220#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY))
221#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY))
222#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG))
223#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
47f12043
WD
224#define pte_special(pte) (0)
225
dbf62d50 226#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
47f12043 227
6012191a
CM
228#if __LINUX_ARM_ARCH__ < 6
229static inline void __sync_icache_dcache(pte_t pteval)
230{
231}
232#else
233extern void __sync_icache_dcache(pte_t pteval);
234#endif
235
236static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
237 pte_t *ptep, pte_t pteval)
238{
47f12043
WD
239 unsigned long ext = 0;
240
241 if (addr < TASK_SIZE && pte_present_user(pteval)) {
6012191a 242 __sync_icache_dcache(pteval);
47f12043 243 ext |= PTE_EXT_NG;
6012191a 244 }
1da177e4 245
47f12043
WD
246 set_pte_ext(ptep, pteval, ext);
247}
6012191a 248
1da177e4
LT
249#define PTE_BIT_FUNC(fn,op) \
250static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
251
36bb94ba
RK
252PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
253PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY);
1da177e4
LT
254PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
255PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
256PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
257PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
258
7e675137
NP
259static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
260
1da177e4
LT
261static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
262{
69dde4c5
CM
263 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
264 L_PTE_NONE | L_PTE_VALID;
1da177e4
LT
265 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
266 return pte;
267}
268
fb93a1c7
RK
269/*
270 * Encode and decode a swap entry. Swap entries are stored in the Linux
271 * page tables as follows:
272 *
273 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
274 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
f5f2025e 275 * <--------------- offset ----------------------> < type -> 0 0 0
1da177e4 276 *
f5f2025e 277 * This gives us up to 31 swap files and 64GB per swap file. Note that
fb93a1c7 278 * the offset field is always non-zero.
1da177e4 279 */
6a00cded 280#define __SWP_TYPE_SHIFT 3
f5f2025e 281#define __SWP_TYPE_BITS 5
fb93a1c7
RK
282#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
283#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
284
285#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
286#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
287#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
288
1da177e4
LT
289#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
290#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
291
fb93a1c7
RK
292/*
293 * It is an error for the kernel to have more swap files than we can
294 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
295 * is increased beyond what we presently support.
296 */
297#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
298
65b1bfc1
RK
299/*
300 * Encode and decode a file entry. File entries are stored in the Linux
301 * page tables as follows:
302 *
303 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
304 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
6a00cded 305 * <----------------------- offset ------------------------> 1 0 0
65b1bfc1
RK
306 */
307#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
6a00cded
RK
308#define pte_to_pgoff(x) (pte_val(x) >> 3)
309#define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE)
65b1bfc1 310
6a00cded 311#define PTE_FILE_MAX_BITS 29
65b1bfc1 312
1da177e4
LT
313/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
314/* FIXME: this is not correct */
315#define kern_addr_valid(addr) (1)
316
317#include <asm-generic/pgtable.h>
318
319/*
320 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
321 */
322#define HAVE_ARCH_UNMAPPED_AREA
7dbaa466 323#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1da177e4
LT
324
325/*
33bf5610 326 * remap a physical page `pfn' of size `size' with page protection `prot'
1da177e4
LT
327 * into virtual address `from'
328 */
1da177e4
LT
329#define io_remap_pfn_range(vma,from,pfn,size,prot) \
330 remap_pfn_range(vma, from, pfn, size, prot)
331
1da177e4
LT
332#define pgtable_cache_init() do { } while (0)
333
334#endif /* !__ASSEMBLY__ */
335
002547b4
RK
336#endif /* CONFIG_MMU */
337
1da177e4 338#endif /* _ASMARM_PGTABLE_H */