Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __PARISC_UACCESS_H |
2 | #define __PARISC_UACCESS_H | |
3 | ||
4 | /* | |
5 | * User space memory access functions | |
6 | */ | |
1da177e4 | 7 | #include <asm/page.h> |
1da177e4 | 8 | #include <asm/cache.h> |
5b17e1cd | 9 | #include <asm-generic/uaccess-unaligned.h> |
1da177e4 | 10 | |
8dd95c68 | 11 | #include <linux/bug.h> |
aace880f | 12 | #include <linux/string.h> |
1da177e4 LT |
13 | |
14 | #define KERNEL_DS ((mm_segment_t){0}) | |
15 | #define USER_DS ((mm_segment_t){1}) | |
16 | ||
b9762e7b | 17 | #define segment_eq(a, b) ((a).seg == (b).seg) |
1da177e4 LT |
18 | |
19 | #define get_ds() (KERNEL_DS) | |
20 | #define get_fs() (current_thread_info()->addr_limit) | |
21 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | |
22 | ||
23 | /* | |
24 | * Note that since kernel addresses are in a separate address space on | |
e49332bd | 25 | * parisc, we don't need to do anything for access_ok(). |
1da177e4 LT |
26 | * We just let the page fault handler do the right thing. This also means |
27 | * that put_user is the same as __put_user, etc. | |
28 | */ | |
29 | ||
186ecf14 HD |
30 | #define access_ok(type, uaddr, size) \ |
31 | ( (uaddr) == (uaddr) ) | |
1da177e4 | 32 | |
1da177e4 LT |
33 | #define put_user __put_user |
34 | #define get_user __get_user | |
35 | ||
ca72a223 | 36 | #if !defined(CONFIG_64BIT) |
3f795cef | 37 | #define LDD_USER(val, ptr) __get_user_asm64(val, ptr) |
b9762e7b | 38 | #define STD_USER(x, ptr) __put_user_asm64(x, ptr) |
1da177e4 | 39 | #else |
3f795cef | 40 | #define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr) |
b9762e7b | 41 | #define STD_USER(x, ptr) __put_user_asm("std", x, ptr) |
1da177e4 LT |
42 | #endif |
43 | ||
44 | /* | |
cb910c17 HD |
45 | * The exception table contains two values: the first is the relative offset to |
46 | * the address of the instruction that is allowed to fault, and the second is | |
47 | * the relative offset to the address of the fixup routine. Since relative | |
48 | * addresses are used, 32bit values are sufficient even on 64bit kernel. | |
1da177e4 LT |
49 | */ |
50 | ||
0de79858 | 51 | #define ARCH_HAS_RELATIVE_EXTABLE |
1da177e4 | 52 | struct exception_table_entry { |
0de79858 HD |
53 | int insn; /* relative address of insn that is allowed to fault. */ |
54 | int fixup; /* relative address of fixup routine */ | |
1da177e4 LT |
55 | }; |
56 | ||
0b3d643f HD |
57 | #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\ |
58 | ".section __ex_table,\"aw\"\n" \ | |
0de79858 | 59 | ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \ |
0b3d643f HD |
60 | ".previous\n" |
61 | ||
d19f5e41 HD |
62 | /* |
63 | * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry | |
64 | * (with lowest bit set) for which the fault handler in fixup_exception() will | |
65 | * load -EFAULT into %r8 for a read or write fault, and zeroes the target | |
66 | * register in case of a read fault in get_user(). | |
67 | */ | |
68 | #define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\ | |
69 | ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1) | |
70 | ||
1da177e4 LT |
71 | /* |
72 | * The page fault handler stores, in a per-cpu area, the following information | |
73 | * if a fixup routine is available. | |
74 | */ | |
75 | struct exception_data { | |
76 | unsigned long fault_ip; | |
2ef4dfd9 | 77 | unsigned long fault_gp; |
1da177e4 LT |
78 | unsigned long fault_space; |
79 | unsigned long fault_addr; | |
80 | }; | |
81 | ||
06bff6b9 HD |
82 | /* |
83 | * load_sr2() preloads the space register %%sr2 - based on the value of | |
84 | * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which | |
85 | * is 0), or with the current value of %%sr3 to access user space (USER_DS) | |
86 | * memory. The following __get_user_asm() and __put_user_asm() functions have | |
87 | * %%sr2 hard-coded to access the requested memory. | |
88 | */ | |
89 | #define load_sr2() \ | |
90 | __asm__(" or,= %0,%%r0,%%r0\n\t" \ | |
91 | " mfsp %%sr3,%0\n\t" \ | |
92 | " mtsp %0,%%sr2\n\t" \ | |
93 | : : "r"(get_fs()) : ) | |
94 | ||
3f795cef HD |
95 | #define __get_user_internal(val, ptr) \ |
96 | ({ \ | |
97 | register long __gu_err __asm__ ("r8") = 0; \ | |
98 | \ | |
99 | switch (sizeof(*(ptr))) { \ | |
100 | case 1: __get_user_asm(val, "ldb", ptr); break; \ | |
101 | case 2: __get_user_asm(val, "ldh", ptr); break; \ | |
102 | case 4: __get_user_asm(val, "ldw", ptr); break; \ | |
103 | case 8: LDD_USER(val, ptr); break; \ | |
104 | default: BUILD_BUG(); \ | |
105 | } \ | |
106 | \ | |
107 | __gu_err; \ | |
1da177e4 LT |
108 | }) |
109 | ||
3f795cef HD |
110 | #define __get_user(val, ptr) \ |
111 | ({ \ | |
112 | load_sr2(); \ | |
113 | __get_user_internal(val, ptr); \ | |
114 | }) | |
115 | ||
116 | #define __get_user_asm(val, ldx, ptr) \ | |
117 | { \ | |
118 | register long __gu_val; \ | |
119 | \ | |
d19f5e41 HD |
120 | __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \ |
121 | "9:\n" \ | |
122 | ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ | |
1da177e4 | 123 | : "=r"(__gu_val), "=r"(__gu_err) \ |
3f795cef HD |
124 | : "r"(ptr), "1"(__gu_err)); \ |
125 | \ | |
126 | (val) = (__force __typeof__(*(ptr))) __gu_val; \ | |
127 | } | |
1da177e4 | 128 | |
d2ad824f HD |
129 | #if !defined(CONFIG_64BIT) |
130 | ||
3f795cef HD |
131 | #define __get_user_asm64(val, ptr) \ |
132 | { \ | |
133 | union { \ | |
134 | unsigned long long l; \ | |
135 | __typeof__(*(ptr)) t; \ | |
136 | } __gu_tmp; \ | |
137 | \ | |
d19f5e41 HD |
138 | __asm__(" copy %%r0,%R0\n" \ |
139 | "1: ldw 0(%%sr2,%2),%0\n" \ | |
140 | "2: ldw 4(%%sr2,%2),%R0\n" \ | |
141 | "9:\n" \ | |
142 | ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ | |
143 | ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ | |
3f795cef HD |
144 | : "=&r"(__gu_tmp.l), "=r"(__gu_err) \ |
145 | : "r"(ptr), "1"(__gu_err)); \ | |
146 | \ | |
147 | (val) = __gu_tmp.t; \ | |
148 | } | |
d2ad824f HD |
149 | |
150 | #endif /* !defined(CONFIG_64BIT) */ | |
151 | ||
152 | ||
3f795cef | 153 | #define __put_user_internal(x, ptr) \ |
1da177e4 LT |
154 | ({ \ |
155 | register long __pu_err __asm__ ("r8") = 0; \ | |
156 | __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ | |
157 | \ | |
06bff6b9 | 158 | switch (sizeof(*(ptr))) { \ |
3f795cef HD |
159 | case 1: __put_user_asm("stb", __x, ptr); break; \ |
160 | case 2: __put_user_asm("sth", __x, ptr); break; \ | |
161 | case 4: __put_user_asm("stw", __x, ptr); break; \ | |
162 | case 8: STD_USER(__x, ptr); break; \ | |
163 | default: BUILD_BUG(); \ | |
164 | } \ | |
1da177e4 LT |
165 | \ |
166 | __pu_err; \ | |
167 | }) | |
168 | ||
3f795cef HD |
169 | #define __put_user(x, ptr) \ |
170 | ({ \ | |
171 | load_sr2(); \ | |
172 | __put_user_internal(x, ptr); \ | |
173 | }) | |
174 | ||
175 | ||
1da177e4 LT |
176 | /* |
177 | * The "__put_user/kernel_asm()" macros tell gcc they read from memory | |
178 | * instead of writing. This is because they do not write to any memory | |
3fd3a74f | 179 | * gcc knows about, so there are no aliasing issues. These macros must |
d19f5e41 HD |
180 | * also be aware that fixups are executed in the context of the fault, |
181 | * and any registers used there must be listed as clobbers. | |
182 | * r8 is already listed as err. | |
1da177e4 LT |
183 | */ |
184 | ||
b9762e7b | 185 | #define __put_user_asm(stx, x, ptr) \ |
1da177e4 | 186 | __asm__ __volatile__ ( \ |
d19f5e41 HD |
187 | "1: " stx " %2,0(%%sr2,%1)\n" \ |
188 | "9:\n" \ | |
189 | ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ | |
1da177e4 | 190 | : "=r"(__pu_err) \ |
d19f5e41 | 191 | : "r"(ptr), "r"(x), "0"(__pu_err)) |
1da177e4 | 192 | |
1da177e4 | 193 | |
ca72a223 | 194 | #if !defined(CONFIG_64BIT) |
94a1981d | 195 | |
b9762e7b | 196 | #define __put_user_asm64(__val, ptr) do { \ |
1da177e4 | 197 | __asm__ __volatile__ ( \ |
d19f5e41 HD |
198 | "1: stw %2,0(%%sr2,%1)\n" \ |
199 | "2: stw %R2,4(%%sr2,%1)\n" \ | |
200 | "9:\n" \ | |
201 | ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ | |
202 | ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ | |
1da177e4 | 203 | : "=r"(__pu_err) \ |
d19f5e41 | 204 | : "r"(ptr), "r"(__val), "0"(__pu_err)); \ |
1da177e4 LT |
205 | } while (0) |
206 | ||
ca72a223 | 207 | #endif /* !defined(CONFIG_64BIT) */ |
1da177e4 LT |
208 | |
209 | ||
210 | /* | |
211 | * Complex access routines -- external declarations | |
212 | */ | |
213 | ||
b1195c0e | 214 | extern long strncpy_from_user(char *, const char __user *, long); |
b9762e7b MT |
215 | extern unsigned lclear_user(void __user *, unsigned long); |
216 | extern long lstrnlen_user(const char __user *, long); | |
1da177e4 LT |
217 | /* |
218 | * Complex access routines -- macros | |
219 | */ | |
a0ffa8f0 | 220 | #define user_addr_max() (~0UL) |
1da177e4 | 221 | |
1da177e4 LT |
222 | #define strnlen_user lstrnlen_user |
223 | #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL) | |
224 | #define clear_user lclear_user | |
225 | #define __clear_user lclear_user | |
226 | ||
f64fd180 AV |
227 | unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src, |
228 | unsigned long len); | |
229 | unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src, | |
230 | unsigned long len); | |
231 | unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src, | |
232 | unsigned long len); | |
233 | #define INLINE_COPY_TO_USER | |
234 | #define INLINE_COPY_FROM_USER | |
888c31fc | 235 | |
e448372c | 236 | struct pt_regs; |
c61c25eb KM |
237 | int fixup_exception(struct pt_regs *regs); |
238 | ||
1da177e4 | 239 | #endif /* __PARISC_UACCESS_H */ |