From: Anton Blanchard Date: Tue, 6 Aug 2013 16:01:51 +0000 (+1000) Subject: powerpc: Make rwlocks endian safe X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=54bb7f4bda0ee49f39dc593c2d73fe6053a99dbb;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git powerpc: Make rwlocks endian safe Our ppc64 spinlocks and rwlocks use a trick where a lock token and the paca index are placed in the lock with a single store. Since we are using two u16s they need adjusting for little endian. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 17d40a2a00c4..a5954cebbc55 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -68,8 +68,13 @@ struct paca_struct { * instruction. They must travel together and be properly * aligned. */ +#ifdef __BIG_ENDIAN__ u16 lock_token; /* Constant 0x8000, used in locks */ u16 paca_index; /* Logical processor number */ +#else + u16 paca_index; /* Logical processor number */ + u16 lock_token; /* Constant 0x8000, used in locks */ +#endif u64 kernel_toc; /* Kernel TOC address */ u64 kernelbase; /* Base address of kernel */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 7c345b6518db..5f54a744dcc5 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -32,8 +32,12 @@ #ifdef CONFIG_PPC64 /* use 0x800000yy when locked, where yy == CPU number */ +#ifdef __BIG_ENDIAN__ #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) #else +#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) +#endif +#else #define LOCK_TOKEN 1 #endif diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index fc25689a9f35..c3785d4aeed7 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -363,7 +363,11 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]); } +#ifdef __BIG_ENDIAN__ #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) +#else +#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) +#endif static inline int try_lock_tlbie(unsigned int *lock) { diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 20e7fcdc4c95..b93e3cd8bf2b 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -393,7 +393,11 @@ toc_tlbie_lock: .tc native_tlbie_lock[TC],native_tlbie_lock .previous ld r3,toc_tlbie_lock@toc(2) +#ifdef __BIG_ENDIAN__ lwz r8,PACA_LOCK_TOKEN(r13) +#else + lwz r8,PACAPACAINDEX(r13) +#endif 24: lwarx r0,0,r3 cmpwi r0,0 bne 24b @@ -968,7 +972,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ /* Take the guest's tlbie_lock */ +#ifdef __BIG_ENDIAN__ lwz r8,PACA_LOCK_TOKEN(r13) +#else + lwz r8,PACAPACAINDEX(r13) +#endif addi r3,r4,KVM_TLBIE_LOCK 24: lwarx r0,0,r3 cmpwi r0,0