From bbeeffec6f14a04454906032e9322538535030dc Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 19 Apr 2016 09:25:07 +0100 Subject: [PATCH] MIPS: mm: Pass scratch register through to iPTE_SW Rather than hardcode a scratch register for the XPA case in iPTE_SW, pass one through from the work registers allocated by the caller. This allows for the XPA path to function correctly regardless of the work registers in use. Without doing this there are cases (where KScratch registers are unavailable) in which iPTE_SW will incorrectly clobber $1 despite it already being in use for the PTE or PTE pointer. Signed-off-by: Paul Burton Reviewed-by: James Hogan Cc: Paul Gortmaker Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/13121/ Signed-off-by: Ralf Baechle --- arch/mips/mm/tlbex.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 81d42c33f882..2e9b43637743 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1639,14 +1639,12 @@ iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) static void iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, - unsigned int mode) + unsigned int mode, unsigned int scratch) { #ifdef CONFIG_PHYS_ADDR_T_64BIT unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); if (config_enabled(CONFIG_XPA) && !cpu_has_64bits) { - const int scratch = 1; /* Our extra working register */ - uasm_i_lui(p, scratch, (mode >> 16)); uasm_i_or(p, pte, pte, scratch); } else @@ -1743,11 +1741,11 @@ build_pte_present(u32 **p, struct uasm_reloc **r, /* Make PTE valid, store result in PTR. */ static void build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, - unsigned int ptr) + unsigned int ptr, unsigned int scratch) { unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; - iPTE_SW(p, r, pte, ptr, mode); + iPTE_SW(p, r, pte, ptr, mode, scratch); } /* @@ -1783,12 +1781,12 @@ build_pte_writable(u32 **p, struct uasm_reloc **r, */ static void build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, - unsigned int ptr) + unsigned int ptr, unsigned int scratch) { unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); - iPTE_SW(p, r, pte, ptr, mode); + iPTE_SW(p, r, pte, ptr, mode, scratch); } /* @@ -1893,7 +1891,7 @@ static void build_r3000_tlb_load_handler(void) build_r3000_tlbchange_handler_head(&p, K0, K1); build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); uasm_i_nop(&p); /* load delay */ - build_make_valid(&p, &r, K0, K1); + build_make_valid(&p, &r, K0, K1, -1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); uasm_l_nopage_tlbl(&l, p); @@ -1924,7 +1922,7 @@ static void build_r3000_tlb_store_handler(void) build_r3000_tlbchange_handler_head(&p, K0, K1); build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); uasm_i_nop(&p); /* load delay */ - build_make_write(&p, &r, K0, K1); + build_make_write(&p, &r, K0, K1, -1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); uasm_l_nopage_tlbs(&l, p); @@ -1955,7 +1953,7 @@ static void build_r3000_tlb_modify_handler(void) build_r3000_tlbchange_handler_head(&p, K0, K1); build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); uasm_i_nop(&p); /* load delay */ - build_make_write(&p, &r, K0, K1); + build_make_write(&p, &r, K0, K1, -1); build_r3000_pte_reload_tlbwi(&p, K0, K1); uasm_l_nopage_tlbm(&l, p); @@ -2123,7 +2121,7 @@ static void build_r4000_tlb_load_handler(void) } uasm_l_tlbl_goaround1(&l, p); } - build_make_valid(&p, &r, wr.r1, wr.r2); + build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3); build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT @@ -2237,7 +2235,7 @@ static void build_r4000_tlb_store_handler(void) build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); - build_make_write(&p, &r, wr.r1, wr.r2); + build_make_write(&p, &r, wr.r1, wr.r2, wr.r3); build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT @@ -2293,7 +2291,7 @@ static void build_r4000_tlb_modify_handler(void) if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); /* Present and writable bits set, set accessed and dirty bits. */ - build_make_write(&p, &r, wr.r1, wr.r2); + build_make_write(&p, &r, wr.r1, wr.r2, wr.r3); build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT -- 2.20.1