From 79270e0a3fd124388a0407f9edbd6ace75eacb69 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Sat, 28 Jan 2017 21:18:40 +0530 Subject: [PATCH] powerpc/mm/hash: Properly mask the ESID bits when building proto VSID The proto VSID is built using both the MMU context id and effective segment ID (ESID). We should not have overlapping bits between those. That could result in us having a VSID collision. With the current code we missed masking the top bits of the ESID. This implies for kernel address we ended up using the top 4 bits of the ESID as part of the proto VSID, which is wrong. The current code use the top 4 context values (0x7fffc - 0x7ffff) for the kernel. With those context IDs used for the kernel, we don't run into VSID collisions because we get the same proto VSID irrespective of whether we mask the ESID bits or not. eg: ea = 0xf000000000000000 context = 0x7ffff w/out masking: proto_vsid = (0x7ffff << 6 | 0xf000000000000000 >> 40) = (0x1ffffc0 | 0xf00000) = 0x1ffffc0 with masking: proto_vsid = (0x7ffff << 6 | ((0xf000000000000000 >> 40) & 0x3f)) = (0x1ffffc0 | (0xf00000 & 0x3f)) = 0x1ffffc0 | 0) = 0x1ffffc0 So although there is no bug, the code is still overly subtle, so fix it to save ourselves pain in future. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/mmu-hash.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 2e6a823fa502..823015cff149 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -525,6 +525,9 @@ extern void slb_set_size(u16 size); #define ESID_BITS 18 #define ESID_BITS_1T 6 +#define ESID_BITS_MASK ((1 << ESID_BITS) - 1) +#define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1) + /* * 256MB segment * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments @@ -660,9 +663,9 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea, if (ssize == MMU_SEGSIZE_256M) return vsid_scramble((context << ESID_BITS) - | (ea >> SID_SHIFT), 256M); + | ((ea >> SID_SHIFT) & ESID_BITS_MASK), 256M); return vsid_scramble((context << ESID_BITS_1T) - | (ea >> SID_SHIFT_1T), 1T); + | ((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK), 1T); } /* -- 2.20.1