From: Russell King Date: Wed, 18 Jul 2007 08:37:10 +0000 (+0100) Subject: [ARM] vfp: make fpexc bit names less verbose X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=228adef16d6e7b7725ef6b9ba760810d5966afa5;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git [ARM] vfp: make fpexc bit names less verbose Use the fpexc abbreviated names instead of long verbose names for fpexc bits. Signed-off-by: Russell King --- diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index d4b7b229631d..0ac022f800a1 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -74,14 +74,14 @@ vfp_support_entry: VFPFMRX r1, FPEXC @ Is the VFP enabled? DBGSTR1 "fpexc %08x", r1 - tst r1, #FPEXC_ENABLE + tst r1, #FPEXC_EN bne look_for_VFP_exceptions @ VFP is already enabled DBGSTR1 "enable %x", r10 ldr r3, last_VFP_context_address - orr r1, r1, #FPEXC_ENABLE @ user FPEXC has the enable bit set + orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer - bic r5, r1, #FPEXC_EXCEPTION @ make sure exceptions are disabled + bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled cmp r4, r10 beq check_for_exception @ we are returning to the same @ process, so the registers are @@ -124,7 +124,7 @@ no_old_VFP_process: VFPFMXR FPSCR, r5 @ restore status check_for_exception: - tst r1, #FPEXC_EXCEPTION + tst r1, #FPEXC_EX bne process_exception @ might as well handle the pending @ exception before retrying branch @ out before setting an FPEXC that @@ -136,10 +136,10 @@ check_for_exception: look_for_VFP_exceptions: - tst r1, #FPEXC_EXCEPTION + tst r1, #FPEXC_EX bne process_exception VFPFMRX r5, FPSCR - tst r5, #FPSCR_IXE @ IXE doesn't set FPEXC_EXCEPTION ! + tst r5, #FPSCR_IXE @ IXE doesn't set FPEXC_EX ! bne process_exception @ Fall into hand on to next handler - appropriate coproc instr diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 1106b5f9cf19..04ddab2bd876 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -53,7 +53,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) * case the thread migrates to a different CPU. The * restoring is done lazily. */ - if ((fpexc & FPEXC_ENABLE) && last_VFP_context[cpu]) { + if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { vfp_save_state(last_VFP_context[cpu], fpexc); last_VFP_context[cpu]->hard.cpu = cpu; } @@ -70,7 +70,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) * Always disable VFP so we can lazily save/restore the * old state. */ - fmxr(FPEXC, fpexc & ~FPEXC_ENABLE); + fmxr(FPEXC, fpexc & ~FPEXC_EN); return NOTIFY_DONE; } @@ -81,13 +81,13 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) */ memset(vfp, 0, sizeof(union vfp_state)); - vfp->hard.fpexc = FPEXC_ENABLE; + vfp->hard.fpexc = FPEXC_EN; vfp->hard.fpscr = FPSCR_ROUND_NEAREST; /* * Disable VFP to ensure we initialise it first. */ - fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE); + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); } /* flush and release case: Per-thread VFP cleanup. */ @@ -229,7 +229,7 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) /* * Enable access to the VFP so we can handle the bounce. */ - fmxr(FPEXC, fpexc & ~(FPEXC_EXCEPTION|FPEXC_INV|FPEXC_UFC|FPEXC_IOC)); + fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_INV|FPEXC_UFC|FPEXC_IOC)); orig_fpscr = fpscr = fmrx(FPSCR); @@ -248,7 +248,7 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) /* * Modify fpscr to indicate the number of iterations remaining */ - if (fpexc & FPEXC_EXCEPTION) { + if (fpexc & FPEXC_EX) { u32 len; len = fpexc + (1 << FPEXC_LENGTH_BIT); diff --git a/include/asm-arm/vfp.h b/include/asm-arm/vfp.h index 14c5e0946c47..bd6be9d7f772 100644 --- a/include/asm-arm/vfp.h +++ b/include/asm-arm/vfp.h @@ -26,8 +26,8 @@ #define FPSID_REV_MASK (0xF << FPSID_REV_BIT) /* FPEXC bits */ -#define FPEXC_EXCEPTION (1<<31) -#define FPEXC_ENABLE (1<<30) +#define FPEXC_EX (1 << 31) +#define FPEXC_EN (1 << 30) /* FPSCR bits */ #define FPSCR_DEFAULT_NAN (1<<25)