[ARM] 4078/1: Fix ARM copypage cache coherency problems
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / vfp / vfpmodule.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/vfp/vfpmodule.c
3 *
4 * Copyright (C) 2004 ARM Limited.
5 * Written by Deep Blue Solutions Limited.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
1da177e4
LT
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/init.h>
d6551e88
RK
17
18#include <asm/thread_notify.h>
1da177e4
LT
19#include <asm/vfp.h>
20
21#include "vfpinstr.h"
22#include "vfp.h"
23
24/*
25 * Our undef handlers (in entry.S)
26 */
27void vfp_testing_entry(void);
28void vfp_support_entry(void);
29
30void (*vfp_vector)(void) = vfp_testing_entry;
31union vfp_state *last_VFP_context;
32
33/*
34 * Dual-use variable.
35 * Used in startup: set to non-zero if VFP checks fail
36 * After startup, holds VFP architecture
37 */
38unsigned int VFP_arch;
39
d6551e88 40static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
1da177e4 41{
d6551e88 42 struct thread_info *thread = v;
681a4991 43 union vfp_state *vfp;
1da177e4 44
681a4991
RK
45 if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
46 /*
47 * Always disable VFP so we can lazily save/restore the
48 * old state.
49 */
50 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE);
51 return NOTIFY_DONE;
52 }
53
54 vfp = &thread->vfpstate;
55 if (cmd == THREAD_NOTIFY_FLUSH) {
d6551e88
RK
56 /*
57 * Per-thread VFP initialisation.
58 */
59 memset(vfp, 0, sizeof(union vfp_state));
1da177e4 60
d6551e88
RK
61 vfp->hard.fpexc = FPEXC_ENABLE;
62 vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
1da177e4 63
d6551e88
RK
64 /*
65 * Disable VFP to ensure we initialise it first.
66 */
67 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE);
d6551e88
RK
68 }
69
681a4991
RK
70 /* flush and release case: Per-thread VFP cleanup. */
71 if (last_VFP_context == vfp)
72 last_VFP_context = NULL;
73
d6551e88 74 return NOTIFY_DONE;
1da177e4
LT
75}
76
d6551e88
RK
77static struct notifier_block vfp_notifier_block = {
78 .notifier_call = vfp_notifier,
79};
80
1da177e4
LT
81/*
82 * Raise a SIGFPE for the current process.
83 * sicode describes the signal being raised.
84 */
85void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
86{
87 siginfo_t info;
88
89 memset(&info, 0, sizeof(info));
90
91 info.si_signo = SIGFPE;
92 info.si_code = sicode;
35d59fc5 93 info.si_addr = (void __user *)(instruction_pointer(regs) - 4);
1da177e4
LT
94
95 /*
96 * This is the same as NWFPE, because it's not clear what
97 * this is used for
98 */
99 current->thread.error_code = 0;
100 current->thread.trap_no = 6;
101
da41119a 102 send_sig_info(SIGFPE, &info, current);
1da177e4
LT
103}
104
105static void vfp_panic(char *reason)
106{
107 int i;
108
109 printk(KERN_ERR "VFP: Error: %s\n", reason);
110 printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
111 fmrx(FPEXC), fmrx(FPSCR), fmrx(FPINST));
112 for (i = 0; i < 32; i += 2)
113 printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
114 i, vfp_get_float(i), i+1, vfp_get_float(i+1));
115}
116
117/*
118 * Process bitmask of exception conditions.
119 */
120static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
121{
122 int si_code = 0;
123
124 pr_debug("VFP: raising exceptions %08x\n", exceptions);
125
7c6f2514 126 if (exceptions == VFP_EXCEPTION_ERROR) {
1da177e4
LT
127 vfp_panic("unhandled bounce");
128 vfp_raise_sigfpe(0, regs);
129 return;
130 }
131
132 /*
133 * If any of the status flags are set, update the FPSCR.
134 * Comparison instructions always return at least one of
135 * these flags set.
136 */
137 if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))
138 fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);
139
140 fpscr |= exceptions;
141
142 fmxr(FPSCR, fpscr);
143
144#define RAISE(stat,en,sig) \
145 if (exceptions & stat && fpscr & en) \
146 si_code = sig;
147
148 /*
149 * These are arranged in priority order, least to highest.
150 */
e0f205d9 151 RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV);
1da177e4
LT
152 RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES);
153 RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND);
154 RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
155 RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
156
157 if (si_code)
158 vfp_raise_sigfpe(si_code, regs);
159}
160
161/*
162 * Emulate a VFP instruction.
163 */
164static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
165{
7c6f2514 166 u32 exceptions = VFP_EXCEPTION_ERROR;
1da177e4
LT
167
168 pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr);
169
170 if (INST_CPRTDO(inst)) {
171 if (!INST_CPRT(inst)) {
172 /*
173 * CPDO
174 */
175 if (vfp_single(inst)) {
176 exceptions = vfp_single_cpdo(inst, fpscr);
177 } else {
178 exceptions = vfp_double_cpdo(inst, fpscr);
179 }
180 } else {
181 /*
182 * A CPRT instruction can not appear in FPINST2, nor
183 * can it cause an exception. Therefore, we do not
184 * have to emulate it.
185 */
186 }
187 } else {
188 /*
189 * A CPDT instruction can not appear in FPINST2, nor can
190 * it cause an exception. Therefore, we do not have to
191 * emulate it.
192 */
193 }
928bd1b4 194 return exceptions & ~VFP_NAN_FLAG;
1da177e4
LT
195}
196
197/*
198 * Package up a bounce condition.
199 */
200void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
201{
202 u32 fpscr, orig_fpscr, exceptions, inst;
203
204 pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
205
206 /*
207 * Enable access to the VFP so we can handle the bounce.
208 */
209 fmxr(FPEXC, fpexc & ~(FPEXC_EXCEPTION|FPEXC_INV|FPEXC_UFC|FPEXC_IOC));
210
211 orig_fpscr = fpscr = fmrx(FPSCR);
212
213 /*
214 * If we are running with inexact exceptions enabled, we need to
215 * emulate the trigger instruction. Note that as we're emulating
216 * the trigger instruction, we need to increment PC.
217 */
218 if (fpscr & FPSCR_IXE) {
219 regs->ARM_pc += 4;
220 goto emulate;
221 }
222
223 barrier();
224
225 /*
226 * Modify fpscr to indicate the number of iterations remaining
227 */
228 if (fpexc & FPEXC_EXCEPTION) {
229 u32 len;
230
231 len = fpexc + (1 << FPEXC_LENGTH_BIT);
232
233 fpscr &= ~FPSCR_LENGTH_MASK;
234 fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT);
235 }
236
237 /*
238 * Handle the first FP instruction. We used to take note of the
239 * FPEXC bounce reason, but this appears to be unreliable.
240 * Emulate the bounced instruction instead.
241 */
242 inst = fmrx(FPINST);
243 exceptions = vfp_emulate_instruction(inst, fpscr, regs);
244 if (exceptions)
245 vfp_raise_exceptions(exceptions, inst, orig_fpscr, regs);
246
247 /*
248 * If there isn't a second FP instruction, exit now.
249 */
250 if (!(fpexc & FPEXC_FPV2))
251 return;
252
253 /*
254 * The barrier() here prevents fpinst2 being read
255 * before the condition above.
256 */
257 barrier();
258 trigger = fmrx(FPINST2);
b7d7ef87 259 orig_fpscr = fpscr = fmrx(FPSCR);
1da177e4
LT
260
261 emulate:
262 exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
263 if (exceptions)
264 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
265}
efe90d27 266
1da177e4
LT
267/*
268 * VFP support code initialisation.
269 */
270static int __init vfp_init(void)
271{
272 unsigned int vfpsid;
efe90d27
RK
273 unsigned int cpu_arch = cpu_architecture();
274 u32 access = 0;
275
276 if (cpu_arch >= CPU_ARCH_ARMv6) {
277 access = get_copro_access();
278
279 /*
280 * Enable full access to VFP (cp10 and cp11)
281 */
282 set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
283 }
1da177e4
LT
284
285 /*
286 * First check that there is a VFP that we can use.
287 * The handler is already setup to just log calls, so
288 * we just need to read the VFPSID register.
289 */
290 vfpsid = fmrx(FPSID);
291
292 printk(KERN_INFO "VFP support v0.3: ");
293 if (VFP_arch) {
294 printk("not present\n");
efe90d27
RK
295
296 /*
297 * Restore the copro access register.
298 */
299 if (cpu_arch >= CPU_ARCH_ARMv6)
300 set_copro_access(access);
1da177e4
LT
301 } else if (vfpsid & FPSID_NODOUBLE) {
302 printk("no double precision support\n");
303 } else {
304 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
305 printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
306 (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
307 (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
308 (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
309 (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
310 (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
efe90d27 311
1da177e4 312 vfp_vector = vfp_support_entry;
d6551e88
RK
313
314 thread_register_notifier(&vfp_notifier_block);
efe90d27
RK
315
316 /*
317 * We detected VFP, and the support code is
318 * in place; report VFP support to userspace.
319 */
320 elf_hwcap |= HWCAP_VFP;
1da177e4
LT
321 }
322 return 0;
323}
324
325late_initcall(vfp_init);