std r6,PACACURRENT(r13) /* Set new 'current' */
ld r8,KSP(r4) /* new stack pointer */
-#ifdef CONFIG_PPC_BOOK3S
+#ifdef CONFIG_PPC_STD_MMU_64
+BEGIN_MMU_FTR_SECTION
+ b 2f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
BEGIN_FTR_SECTION
clrrdi r6,r8,28 /* get its ESID */
clrrdi r9,r1,28 /* get current sp ESID */
slbmte r7,r0
isync
2:
-#endif /* !CONFIG_PPC_BOOK3S */
+#endif /* CONFIG_PPC_STD_MMU_64 */
CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
ld r3,PACA_EXGEN+EX_DAR(r13)
lwz r4,PACA_EXGEN+EX_DSISR(r13)
li r5,0x300
+ std r3,_DAR(r1)
+ std r4,_DSISR(r1)
+BEGIN_MMU_FTR_SECTION
b do_hash_page /* Try to handle as hpte fault */
+MMU_FTR_SECTION_ELSE
+ b handle_page_fault
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
.align 7
.globl h_data_storage_common
ld r3,_NIP(r1)
andis. r4,r12,0x5820
li r5,0x400
+ std r3,_DAR(r1)
+ std r4,_DSISR(r1)
+BEGIN_MMU_FTR_SECTION
b do_hash_page /* Try to handle as hpte fault */
+MMU_FTR_SECTION_ELSE
+ b handle_page_fault
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
+#ifdef CONFIG_PPC_STD_MMU_64
+BEGIN_MMU_FTR_SECTION
bl slb_allocate_realmode
-
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX)
+#endif
/* All done -- return from exception. */
ld r10,PACA_EXSLB+EX_LR(r13)
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
mtlr r10
-
+BEGIN_MMU_FTR_SECTION
+ b 2f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
beq- 2f
*/
.align 7
do_hash_page:
- std r3,_DAR(r1)
- std r4,_DSISR(r1)
-
+#ifdef CONFIG_PPC_STD_MMU_64
andis. r0,r4,0xa410 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */
andis. r0,r4,DSISR_DABRMATCH@h
/* Error */
blt- 13f
+#endif /* CONFIG_PPC_STD_MMU_64 */
/* Here we have a page fault that hash_page can't handle. */
handle_page_fault:
12: b ret_from_except_lite
+#ifdef CONFIG_PPC_STD_MMU_64
/* We have a page fault that hash_page could handle but HV refused
* the PTE insertion
*/
ld r4,_DAR(r1)
bl low_hash_fault
b ret_from_except
+#endif
/*
* We come here as a result of a DSI at a point where we don't want
* end of the blocked region (begin >= high). Use the
* boolean identity !(a || b) === (!a && !b).
*/
+#ifdef CONFIG_PPC_STD_MMU_64
if (htab_address) {
low = __pa(htab_address);
high = low + htab_size_bytes;
return -ETXTBSY;
}
}
+#endif /* CONFIG_PPC_STD_MMU_64 */
/* We also should not overwrite the tce tables */
for_each_node_by_type(node, "pci") {
/* NOTREACHED */
}
-#ifndef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_STD_MMU_64
/* Values we need to export to the second kernel via the device tree. */
static unsigned long htab_base;
static unsigned long htab_size;
return 0;
}
late_initcall(export_htab_values);
-#endif /* !CONFIG_PPC_BOOK3E */
+#endif /* CONFIG_PPC_STD_MMU_64 */
/* flush SLBs and reload */
+#ifdef CONFIG_PPC_MMU_STD_64
static void flush_and_reload_slb(void)
{
struct slb_shadow *slb;
asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
}
}
+#endif
static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
{
* reset the error bits whenever we handle them so that at the end
* we can check whether we handled all of them or not.
* */
+#ifdef CONFIG_PPC_MMU_STD_64
if (dsisr & slb_error_bits) {
flush_and_reload_slb();
/* reset error bits */
/* reset error bits */
dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
}
+#endif
/* Any other errors we don't understand? */
if (dsisr & 0xffffffffUL)
handled = 0;
switch (P7_SRR1_MC_IFETCH(srr1)) {
case 0:
break;
+#ifdef CONFIG_PPC_MMU_STD_64
case P7_SRR1_MC_IFETCH_SLB_PARITY:
case P7_SRR1_MC_IFETCH_SLB_MULTIHIT:
/* flush and reload SLBs for SLB errors. */
handled = 1;
}
break;
+#endif
default:
break;
}
handled = mce_handle_common_ierror(srr1);
+#ifdef CONFIG_PPC_MMU_STD_64
if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) {
flush_and_reload_slb();
handled = 1;
}
+#endif
return handled;
}
handled = mce_handle_common_ierror(srr1);
+#ifdef CONFIG_PPC_MMU_STD_64
if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) {
flush_and_reload_slb();
handled = 1;
}
+#endif
return handled;
}
}
#endif /* CONFIG_PPC64 */
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_STD_MMU_64
batch = this_cpu_ptr(&ppc64_tlb_batch);
if (batch->active) {
current_thread_info()->local_flags |= _TLF_LAZY_MMU;
__flush_tlb_pending(batch);
batch->active = 0;
}
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_STD_MMU_64 */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
switch_booke_debug_regs(&new->thread.debug);
last = _switch(old_thread, new_thread);
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_STD_MMU_64
if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
batch = this_cpu_ptr(&ppc64_tlb_batch);
if (current_thread_info()->task->thread.regs)
restore_math(current_thread_info()->task->thread.regs);
-
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_STD_MMU_64 */
return last;
}
unsigned long sp_vsid;
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
+ if (radix_enabled())
+ return;
+
if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
<< SLB_VSID_SHIFT_1T;
* the heap, we can put it above 1TB so it is backed by a 1TB
* segment. Otherwise the heap will be in the bottom 1TB
* which always uses 256MB segments and this may result in a
- * performance penalty.
+ * performance penalty. We don't need to worry about radix. For
+ * radix, mmu_highuser_ssize remains unchanged from 256MB.
*/
if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
printf("%s", after);
}
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_STD_MMU_64
void dump_segments(void)
{
int i;