Fix STK_PARAM and use it instead of hardcoding ABIv1 offsets.
Signed-off-by: Anton Blanchard <anton@samba.org>
#define __STK_REG(i) (112 + ((i)-14)*8)
#define STK_REG(i) __STK_REG(__REG_##i)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define __STK_PARAM(i) (32 + ((i)-3)*8)
+#else
#define __STK_PARAM(i) (48 + ((i)-3)*8)
+#endif
#define STK_PARAM(i) __STK_PARAM(__REG_##i)
#if defined(_CALL_ELF) && _CALL_ELF == 2
#ifdef CONFIG_ALTIVEC
mflr r0
- std r3,48(r1)
- std r4,56(r1)
+ std r3,STK_PARAM(R3)(r1)
+ std r4,STK_PARAM(R4)(r1)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
bl enter_vmx_copy
cmpwi r3,0
ld r0,STACKFRAMESIZE+16(r1)
- ld r3,STACKFRAMESIZE+48(r1)
- ld r4,STACKFRAMESIZE+56(r1)
+ ld r3,STACKFRAMESIZE+STK_PARAM(R3)(r1)
+ ld r4,STACKFRAMESIZE+STK_PARAM(R4)(r1)
mtlr r0
li r0,(PAGE_SIZE/128)
.Lexit:
addi r1,r1,STACKFRAMESIZE
.Ldo_err1:
- ld r3,48(r1)
- ld r4,56(r1)
- ld r5,64(r1)
+ ld r3,STK_PARAM(R3)(r1)
+ ld r4,STK_PARAM(R4)(r1)
+ ld r5,STK_PARAM(R5)(r1)
b __copy_tofrom_user_base
cmpldi r5,16
cmpldi cr1,r5,4096
- std r3,48(r1)
- std r4,56(r1)
- std r5,64(r1)
+ std r3,STK_PARAM(R3)(r1)
+ std r4,STK_PARAM(R4)(r1)
+ std r5,STK_PARAM(R5)(r1)
blt .Lshort_copy
bgt cr1,.Lvmx_copy
#else
cmpldi r5,16
- std r3,48(r1)
- std r4,56(r1)
- std r5,64(r1)
+ std r3,STK_PARAM(R3)(r1)
+ std r4,STK_PARAM(R4)(r1)
+ std r5,STK_PARAM(R5)(r1)
blt .Lshort_copy
#endif
bl enter_vmx_usercopy
cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
- ld r3,STACKFRAMESIZE+48(r1)
- ld r4,STACKFRAMESIZE+56(r1)
- ld r5,STACKFRAMESIZE+64(r1)
+ ld r3,STACKFRAMESIZE+STK_PARAM(R3)(r1)
+ ld r4,STACKFRAMESIZE+STK_PARAM(R4)(r1)
+ ld r5,STACKFRAMESIZE+STK_PARAM(R5)(r1)
mtlr r0
/*
.align 7
_GLOBAL(memcpy)
BEGIN_FTR_SECTION
- std r3,48(r1) /* save destination pointer for return value */
+ std r3,STK_PARAM(R3)(r1) /* save destination pointer for return value */
FTR_SECTION_ELSE
#ifndef SELFTEST
b memcpy_power7
2: bf cr7*4+3,3f
lbz r9,8(r4)
stb r9,0(r3)
-3: ld r3,48(r1) /* return dest pointer */
+3: ld r3,STK_PARAM(R3)(r1) /* return dest pointer */
blr
.Lsrc_unaligned:
2: bf cr7*4+3,3f
rotldi r9,r9,8
stb r9,0(r3)
-3: ld r3,48(r1) /* return dest pointer */
+3: ld r3,STK_PARAM(R3)(r1) /* return dest pointer */
blr
.Ldst_unaligned:
3: bf cr7*4+3,4f
lbz r0,0(r4)
stb r0,0(r3)
-4: ld r3,48(r1) /* return dest pointer */
+4: ld r3,STK_PARAM(R3)(r1) /* return dest pointer */
blr
cmpldi r5,16
cmpldi cr1,r5,4096
- std r3,48(r1)
+ std r3,STK_PARAM(R1)(r1)
blt .Lshort_copy
bgt cr1,.Lvmx_copy
#else
cmpldi r5,16
- std r3,48(r1)
+ std r3,STK_PARAM(R1)(r1)
blt .Lshort_copy
#endif
lbz r0,0(r4)
stb r0,0(r3)
-15: ld r3,48(r1)
+15: ld r3,STK_PARAM(R3)(r1)
blr
.Lunwind_stack_nonvmx_copy:
#ifdef CONFIG_ALTIVEC
.Lvmx_copy:
mflr r0
- std r4,56(r1)
- std r5,64(r1)
+ std r4,STK_PARAM(R4)(r1)
+ std r5,STK_PARAM(R5)(r1)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
bl enter_vmx_copy
cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
- ld r3,STACKFRAMESIZE+48(r1)
- ld r4,STACKFRAMESIZE+56(r1)
- ld r5,STACKFRAMESIZE+64(r1)
+ ld r3,STACKFRAMESIZE+STK_PARAM(R3)(r1)
+ ld r4,STACKFRAMESIZE+STK_PARAM(R4)(r1)
+ ld r5,STACKFRAMESIZE+STK_PARAM(R5)(r1)
mtlr r0
/*
stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
- ld r3,48(r1)
+ ld r3,STK_PARAM(R3)(r1)
b exit_vmx_copy /* tail call optimise */
.Lvmx_unaligned_copy:
stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
- ld r3,48(r1)
+ ld r3,STK_PARAM(R3)(r1)
b exit_vmx_copy /* tail call optimise */
#endif /* CONFiG_ALTIVEC */