--- /dev/null
-#ifdef CONFIG_MIPS_MT_SMTC
- mfc0 k0, CP0_TCSTATUS
- LONG_S k0, PT_TCSTATUS(sp)
-#endif
+ /*
+ * Copyright (C) 2014 Imagination Technologies Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * PM helper macros for CPU power off (e.g. Suspend-to-RAM).
+ */
+
+ #ifndef __ASM_PM_H
+ #define __ASM_PM_H
+
+ #ifdef __ASSEMBLY__
+
+ #include <asm/asm-offsets.h>
+ #include <asm/asm.h>
+ #include <asm/mipsregs.h>
+ #include <asm/regdef.h>
+
+ /* Save CPU state to stack for suspend to RAM */
+ .macro SUSPEND_SAVE_REGS
+ subu sp, PT_SIZE
+ /* Call preserved GPRs */
+ LONG_S $16, PT_R16(sp)
+ LONG_S $17, PT_R17(sp)
+ LONG_S $18, PT_R18(sp)
+ LONG_S $19, PT_R19(sp)
+ LONG_S $20, PT_R20(sp)
+ LONG_S $21, PT_R21(sp)
+ LONG_S $22, PT_R22(sp)
+ LONG_S $23, PT_R23(sp)
+ LONG_S $28, PT_R28(sp)
+ LONG_S $30, PT_R30(sp)
+ LONG_S $31, PT_R31(sp)
+ /* A couple of CP0 registers with space in pt_regs */
+ mfc0 k0, CP0_STATUS
+ LONG_S k0, PT_STATUS(sp)
-#ifdef CONFIG_MIPS_MT_SMTC
- LONG_L k0, PT_TCSTATUS(sp)
- mtc0 k0, CP0_TCSTATUS
-#endif
+ .endm
+
+ /* Restore CPU state from stack after resume from RAM */
+ .macro RESUME_RESTORE_REGS_RETURN
+ .set push
+ .set noreorder
+ /* A couple of CP0 registers with space in pt_regs */
+ LONG_L k0, PT_STATUS(sp)
+ mtc0 k0, CP0_STATUS
+ /* Call preserved GPRs */
+ LONG_L $16, PT_R16(sp)
+ LONG_L $17, PT_R17(sp)
+ LONG_L $18, PT_R18(sp)
+ LONG_L $19, PT_R19(sp)
+ LONG_L $20, PT_R20(sp)
+ LONG_L $21, PT_R21(sp)
+ LONG_L $22, PT_R22(sp)
+ LONG_L $23, PT_R23(sp)
+ LONG_L $28, PT_R28(sp)
+ LONG_L $30, PT_R30(sp)
+ LONG_L $31, PT_R31(sp)
+ /* Pop and return */
+ jr ra
+ addiu sp, PT_SIZE
+ .set pop
+ .endm
+
+ /* Get address of static suspend state into t1 */
+ .macro LA_STATIC_SUSPEND
+ la t1, mips_static_suspend_state
+ .endm
+
+ /* Save important CPU state for early restoration to global data */
+ .macro SUSPEND_SAVE_STATIC
+ #ifdef CONFIG_EVA
+ /*
+ * Segment configuration is saved in global data where it can be easily
+ * reloaded without depending on the segment configuration.
+ */
+ mfc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */
+ LONG_S k0, SSS_SEGCTL0(t1)
+ mfc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */
+ LONG_S k0, SSS_SEGCTL1(t1)
+ mfc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */
+ LONG_S k0, SSS_SEGCTL2(t1)
+ #endif
+ /* save stack pointer (pointing to GPRs) */
+ LONG_S sp, SSS_SP(t1)
+ .endm
+
+ /* Restore important CPU state early from global data */
+ .macro RESUME_RESTORE_STATIC
+ #ifdef CONFIG_EVA
+ /*
+ * Segment configuration must be restored prior to any access to
+ * allocated memory, as it may reside outside of the legacy kernel
+ * segments.
+ */
+ LONG_L k0, SSS_SEGCTL0(t1)
+ mtc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */
+ LONG_L k0, SSS_SEGCTL1(t1)
+ mtc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */
+ LONG_L k0, SSS_SEGCTL2(t1)
+ mtc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */
+ tlbw_use_hazard
+ #endif
+ /* restore stack pointer (pointing to GPRs) */
+ LONG_L sp, SSS_SP(t1)
+ .endm
+
+ /* flush caches to make sure context has reached memory */
+ .macro SUSPEND_CACHE_FLUSH
+ .extern __wback_cache_all
+ .set push
+ .set noreorder
+ la t1, __wback_cache_all
+ LONG_L t0, 0(t1)
+ jalr t0
+ nop
+ .set pop
+ .endm
+
+ /* Save suspend state and flush data caches to RAM */
+ .macro SUSPEND_SAVE
+ SUSPEND_SAVE_REGS
+ LA_STATIC_SUSPEND
+ SUSPEND_SAVE_STATIC
+ SUSPEND_CACHE_FLUSH
+ .endm
+
+ /* Restore saved state after resume from RAM and return */
+ .macro RESUME_RESTORE_RETURN
+ LA_STATIC_SUSPEND
+ RESUME_RESTORE_STATIC
+ RESUME_RESTORE_REGS_RETURN
+ .endm
+
+ #else /* __ASSEMBLY__ */
+
+ /**
+ * struct mips_static_suspend_state - Core saved CPU state across S2R.
+ * @segctl: CP0 Segment control registers.
+ * @sp: Stack frame where GP register context is saved.
+ *
+ * This structure contains minimal CPU state that must be saved in static kernel
+ * data in order to be able to restore the rest of the state. This includes
+ * segmentation configuration in the case of EVA being enabled, as they must be
+ * restored prior to any kmalloc'd memory being referenced (even the stack
+ * pointer).
+ */
+ struct mips_static_suspend_state {
+ #ifdef CONFIG_EVA
+ unsigned long segctl[3];
+ #endif
+ unsigned long sp;
+ };
+
+ #endif /* !__ASSEMBLY__ */
+
+ #endif /* __ASM_PM_HELPERS_H */
local_irq_enable();
}
-static void cps_cpus_done(void)
-{
-}
-
+ #ifdef CONFIG_HOTPLUG_CPU
+
+ static int cps_cpu_disable(void)
+ {
+ unsigned cpu = smp_processor_id();
+ struct core_boot_config *core_cfg;
+
+ if (!cpu)
+ return -EBUSY;
+
+ if (!cps_pm_support_state(CPS_PM_POWER_GATED))
+ return -EINVAL;
+
+ core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
+ atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask);
+ smp_mb__after_atomic_dec();
+ set_cpu_online(cpu, false);
+ cpu_clear(cpu, cpu_callin_map);
+
+ return 0;
+ }
+
+ static DECLARE_COMPLETION(cpu_death_chosen);
+ static unsigned cpu_death_sibling;
+ static enum {
+ CPU_DEATH_HALT,
+ CPU_DEATH_POWER,
+ } cpu_death;
+
+ void play_dead(void)
+ {
+ unsigned cpu, core;
+
+ local_irq_disable();
+ idle_task_exit();
+ cpu = smp_processor_id();
+ cpu_death = CPU_DEATH_POWER;
+
+ if (cpu_has_mipsmt) {
+ core = cpu_data[cpu].core;
+
+ /* Look for another online VPE within the core */
+ for_each_online_cpu(cpu_death_sibling) {
+ if (cpu_data[cpu_death_sibling].core != core)
+ continue;
+
+ /*
+ * There is an online VPE within the core. Just halt
+ * this TC and leave the core alone.
+ */
+ cpu_death = CPU_DEATH_HALT;
+ break;
+ }
+ }
+
+ /* This CPU has chosen its way out */
+ complete(&cpu_death_chosen);
+
+ if (cpu_death == CPU_DEATH_HALT) {
+ /* Halt this TC */
+ write_c0_tchalt(TCHALT_H);
+ instruction_hazard();
+ } else {
+ /* Power down the core */
+ cps_pm_enter_state(CPS_PM_POWER_GATED);
+ }
+
+ /* This should never be reached */
+ panic("Failed to offline CPU %u", cpu);
+ }
+
+ static void wait_for_sibling_halt(void *ptr_cpu)
+ {
+ unsigned cpu = (unsigned)ptr_cpu;
+ unsigned vpe_id = cpu_data[cpu].vpe_id;
+ unsigned halted;
+ unsigned long flags;
+
+ do {
+ local_irq_save(flags);
+ settc(vpe_id);
+ halted = read_tc_c0_tchalt();
+ local_irq_restore(flags);
+ } while (!(halted & TCHALT_H));
+ }
+
+ static void cps_cpu_die(unsigned int cpu)
+ {
+ unsigned core = cpu_data[cpu].core;
+ unsigned stat;
+ int err;
+
+ /* Wait for the cpu to choose its way out */
+ if (!wait_for_completion_timeout(&cpu_death_chosen,
+ msecs_to_jiffies(5000))) {
+ pr_err("CPU%u: didn't offline\n", cpu);
+ return;
+ }
+
+ /*
+ * Now wait for the CPU to actually offline. Without doing this that
+ * offlining may race with one or more of:
+ *
+ * - Onlining the CPU again.
+ * - Powering down the core if another VPE within it is offlined.
+ * - A sibling VPE entering a non-coherent state.
+ *
+ * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
+ * with which we could race, so do nothing.
+ */
+ if (cpu_death == CPU_DEATH_POWER) {
+ /*
+ * Wait for the core to enter a powered down or clock gated
+ * state, the latter happening when a JTAG probe is connected
+ * in which case the CPC will refuse to power down the core.
+ */
+ do {
+ mips_cpc_lock_other(core);
+ stat = read_cpc_co_stat_conf();
+ stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
+ mips_cpc_unlock_other();
+ } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
+ stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
+ stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
+
+ /* Indicate the core is powered off */
+ bitmap_clear(core_power, core, 1);
+ } else if (cpu_has_mipsmt) {
+ /*
+ * Have a CPU with access to the offlined CPUs registers wait
+ * for its TC to halt.
+ */
+ err = smp_call_function_single(cpu_death_sibling,
+ wait_for_sibling_halt,
+ (void *)cpu, 1);
+ if (err)
+ panic("Failed to call remote sibling CPU\n");
+ }
+ }
+
+ #endif /* CONFIG_HOTPLUG_CPU */
+
static struct plat_smp_ops cps_smp_ops = {
.smp_setup = cps_smp_setup,
.prepare_cpus = cps_prepare_cpus,
.smp_finish = cps_smp_finish,
.send_ipi_single = gic_send_ipi_single,
.send_ipi_mask = gic_send_ipi_mask,
- .cpus_done = cps_cpus_done,
+ #ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = cps_cpu_disable,
+ .cpu_die = cps_cpu_die,
+ #endif
};
+ bool mips_cps_smp_in_use(void)
+ {
+ extern struct plat_smp_ops *mp_ops;
+ return mp_ops == &cps_smp_ops;
+ }
+
int register_cps_smp_ops(void)
{
if (!mips_cm_present()) {
} else
set_c0_cause(CAUSEF_IV);
}
-#ifdef CONFIG_MIPS_MT_SMTC
- int secondaryTC = 0;
- int bootTC = (cpu == 0);
-
- /*
- * Only do per_cpu_trap_init() for first TC of Each VPE.
- * Note that this hack assumes that the SMTC init code
- * assigns TCs consecutively and in ascending order.
- */
-
- if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
- ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
- secondaryTC = 1;
-#endif /* CONFIG_MIPS_MT_SMTC */
+ }
+
+ void per_cpu_trap_init(bool is_boot_cpu)
+ {
+ unsigned int cpu = smp_processor_id();
-#ifdef CONFIG_MIPS_MT_SMTC
- if (!secondaryTC) {
-#endif /* CONFIG_MIPS_MT_SMTC */
-
+
+ configure_status();
+ configure_hwrena();
+
+ configure_exception_vector();
/*
* Before R2 both interrupt numbers were fixed to 7, so on R2 only:
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
- void *kmap_coherent(struct page *page, unsigned long addr)
-#ifdef CONFIG_MIPS_MT_SMTC
-static pte_t *kmap_coherent_pte;
-static void __init kmap_coherent_init(void)
-{
- unsigned long vaddr;
-
- /* cache the first coherent kmap pte */
- vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
- kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
-}
-#else
-static inline void kmap_coherent_init(void) {}
-#endif
-
+ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
{
enum fixed_addresses idx;
unsigned long vaddr, flags, entrylo;
pagefault_disable();
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
-#ifdef CONFIG_MIPS_MT_SMTC
- idx += FIX_N_COLOURS * smp_processor_id() +
- (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
-#else
idx += in_interrupt() ? FIX_N_COLOURS : 0;
-#endif
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
- pte = mk_pte(page, PAGE_KERNEL);
+ pte = mk_pte(page, prot);
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
entrylo = pte.pte_high;
#else
return (void*) vaddr;
}
+ void *kmap_coherent(struct page *page, unsigned long addr)
+ {
+ return __kmap_pgprot(page, addr, PAGE_KERNEL);
+ }
+
+ void *kmap_noncoherent(struct page *page, unsigned long addr)
+ {
+ return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
+ }
+
void kunmap_coherent(void)
{
-#ifndef CONFIG_MIPS_MT_SMTC
unsigned int wired;
unsigned long flags, old_ctx;