[MIPS] MT: Improved multithreading support.
authorRalf Baechle <ralf@linux-mips.org>
Wed, 5 Apr 2006 08:45:45 +0000 (09:45 +0100)
committerRalf Baechle <ralf@linux-mips.org>
Wed, 19 Apr 2006 02:14:28 +0000 (04:14 +0200)
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
52 files changed:
arch/mips/Kconfig
arch/mips/kernel/Makefile
arch/mips/kernel/asm-offsets.c
arch/mips/kernel/entry.S
arch/mips/kernel/gdb-low.S
arch/mips/kernel/gdb-stub.c
arch/mips/kernel/genex.S
arch/mips/kernel/head.S
arch/mips/kernel/i8259.c
arch/mips/kernel/irq-msc01.c
arch/mips/kernel/irq.c
arch/mips/kernel/mips-mt.c [new file with mode: 0644]
arch/mips/kernel/process.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/ptrace32.c
arch/mips/kernel/r4k_switch.S
arch/mips/kernel/smp-mt.c [new file with mode: 0644]
arch/mips/kernel/smp.c
arch/mips/kernel/smp_mt.c [deleted file]
arch/mips/kernel/smtc-asm.S [new file with mode: 0644]
arch/mips/kernel/smtc-proc.c [new file with mode: 0644]
arch/mips/kernel/smtc.c [new file with mode: 0644]
arch/mips/kernel/time.c
arch/mips/kernel/traps.c
arch/mips/kernel/vmlinux.lds.S
arch/mips/mips-boards/generic/init.c
arch/mips/mips-boards/generic/time.c
arch/mips/mips-boards/malta/Makefile
arch/mips/mips-boards/malta/malta_int.c
arch/mips/mips-boards/malta/malta_smp.c [new file with mode: 0644]
arch/mips/mips-boards/sim/cmdline.c [deleted file]
arch/mips/mips-boards/sim/sim_cmdline.c
arch/mips/mips-boards/sim/sim_smp.c
arch/mips/mm/fault.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlbex.c
include/asm-mips/asmmacro.h
include/asm-mips/cpu-info.h
include/asm-mips/hazards.h
include/asm-mips/interrupt.h
include/asm-mips/irq.h
include/asm-mips/mips_mt.h [new file with mode: 0644]
include/asm-mips/mipsmtregs.h
include/asm-mips/mipsregs.h
include/asm-mips/mmu_context.h
include/asm-mips/processor.h
include/asm-mips/ptrace.h
include/asm-mips/r4kcache.h
include/asm-mips/smtc.h [new file with mode: 0644]
include/asm-mips/smtc_ipi.h [new file with mode: 0644]
include/asm-mips/smtc_proc.h [new file with mode: 0644]
include/asm-mips/stackframe.h

index a7bac0459f99f0b317251d07f88e4035138f7ab3..f9be549645eaf254dcb2fabd1fa29f886ffeb345 100644 (file)
@@ -1447,6 +1447,10 @@ choice
        prompt "MIPS MT options"
        depends on MIPS_MT
 
+config MIPS_MT_SMTC
+       bool "SMTC: Use all TCs on all VPEs for SMP"
+       select SMP
+
 config MIPS_MT_SMP
        bool "Use 1 TC on each available VPE for SMP"
        select SMP
@@ -1613,7 +1617,7 @@ source "mm/Kconfig"
 
 config SMP
        bool "Multi-Processing support"
-       depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP
+       depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP || MIPS_MT_SMTC
        ---help---
          This enables support for systems with more than one CPU. If you have
          a system with only one CPU, like most personal computers, say N. If
index 9ec01de81c04f0907fa8d6d56f88a926eef19d41..34e8a256765c3f2a811e6c6edc701ccb6544b032 100644 (file)
@@ -34,7 +34,9 @@ obj-$(CONFIG_CPU_R6000)               += r6000_fpu.o r4k_switch.o
 
 obj-$(CONFIG_SMP)              += smp.o
 
-obj-$(CONFIG_MIPS_MT_SMP)      += smp_mt.o
+obj-$(CONFIG_MIPS_MT)          += mips-mt.o
+obj-$(CONFIG_MIPS_MT_SMTC)     += smtc.o smtc-asm.o smtc-proc.o
+obj-$(CONFIG_MIPS_MT_SMP)      += smp-mt.o
 
 obj-$(CONFIG_MIPS_APSP_KSPD)   += kspd.o
 obj-$(CONFIG_MIPS_VPE_LOADER)  += vpe.o
index ca6b03c773be3b2d7da5578b2d8da7838a7c19b0..92b28b674d6f615be8ab8894ea117e06018f44d1 100644 (file)
@@ -69,6 +69,9 @@ void output_ptreg_defines(void)
        offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr);
        offset("#define PT_STATUS ", struct pt_regs, cp0_status);
        offset("#define PT_CAUSE  ", struct pt_regs, cp0_cause);
+#ifdef CONFIG_MIPS_MT_SMTC
+       offset("#define PT_TCSTATUS  ", struct pt_regs, cp0_tcstatus);
+#endif /* CONFIG_MIPS_MT_SMTC */
        size("#define PT_SIZE   ", struct pt_regs);
        linefeed;
 }
index b1939a486d2c586c82f77caf05e3e84eccd0611a..d101d2fb24caab6f8283647a20a5c37768f30c05 100644 (file)
@@ -17,6 +17,9 @@
 #include <asm/isadep.h>
 #include <asm/thread_info.h>
 #include <asm/war.h>
+#ifdef CONFIG_MIPS_MT_SMTC
+#include <asm/mipsmtregs.h>
+#endif
 
 #ifdef CONFIG_PREEMPT
        .macro  preempt_stop
@@ -75,6 +78,37 @@ FEXPORT(syscall_exit)
        bnez    t0, syscall_exit_work
 
 FEXPORT(restore_all)                   # restore full frame
+#ifdef CONFIG_MIPS_MT_SMTC
+/* Detect and execute deferred IPI "interrupts" */
+       move    a0,sp
+       jal     deferred_smtc_ipi
+/* Re-arm any temporarily masked interrupts not explicitly "acked" */
+       mfc0    v0, CP0_TCSTATUS
+       ori     v1, v0, TCSTATUS_IXMT
+       mtc0    v1, CP0_TCSTATUS
+       andi    v0, TCSTATUS_IXMT
+       ehb
+       mfc0    t0, CP0_TCCONTEXT
+       DMT     9                               # dmt t1
+       jal     mips_ihb
+       mfc0    t2, CP0_STATUS
+       andi    t3, t0, 0xff00
+       or      t2, t2, t3
+       mtc0    t2, CP0_STATUS
+       ehb
+       andi    t1, t1, VPECONTROL_TE
+       beqz    t1, 1f
+       EMT
+1:
+       mfc0    v1, CP0_TCSTATUS
+       /* We set IXMT above, XOR should cler it here */
+       xori    v1, v1, TCSTATUS_IXMT
+       or      v1, v0, v1
+       mtc0    v1, CP0_TCSTATUS
+       ehb
+       xor     t0, t0, t3
+       mtc0    t0, CP0_TCCONTEXT
+#endif /* CONFIG_MIPS_MT_SMTC */
        .set    noat
        RESTORE_TEMP
        RESTORE_AT
index 235ad9f6bd350595318335171a2ab4e2db5b72d5..10f28fb9f008a92fc89b68c89734233297e8472b 100644 (file)
  */
 
 3:
+#ifdef CONFIG_MIPS_MT_SMTC
+               /* Read-modify write of Status must be atomic */
+               mfc0    t2, CP0_TCSTATUS
+               ori     t1, t2, TCSTATUS_IXMT
+               mtc0    t1, CP0_TCSTATUS
+               andi    t2, t2, TCSTATUS_IXMT
+               ehb
+               DMT     9                               # dmt   t1
+               jal     mips_ihb
+               nop
+#endif /* CONFIG_MIPS_MT_SMTC */
                mfc0    t0, CP0_STATUS
                ori     t0, 0x1f
                xori    t0, 0x1f
                mtc0    t0, CP0_STATUS
-
+#ifdef CONFIG_MIPS_MT_SMTC
+               andi    t1, t1, VPECONTROL_TE
+               beqz    t1, 9f
+               nop
+               EMT                                     # emt
+9:
+               mfc0    t1, CP0_TCSTATUS
+               xori    t1, t1, TCSTATUS_IXMT
+               or      t1, t1, t2
+               mtc0    t1, CP0_TCSTATUS
+               ehb
+#endif /* CONFIG_MIPS_MT_SMTC */
                LONG_L  v0, GDB_FR_STATUS(sp)
                LONG_L  v1, GDB_FR_EPC(sp)
                mtc0    v0, CP0_STATUS
index d4f88e0af24c04fcb51271d50414242418f96ded..6ecbdc1fefd13acdd930998a07c19e633aaab354 100644 (file)
 #include <asm/system.h>
 #include <asm/gdb-stub.h>
 #include <asm/inst.h>
+#include <asm/smp.h>
 
 /*
  * external low-level support routines
@@ -669,6 +670,64 @@ static void kgdb_wait(void *arg)
        local_irq_restore(flags);
 }
 
+/*
+ * GDB stub needs to call kgdb_wait on all processor with interrupts
+ * disabled, so it uses it's own special variant.
+ */
+static int kgdb_smp_call_kgdb_wait(void)
+{
+#ifdef CONFIG_SMP
+       struct call_data_struct data;
+       int i, cpus = num_online_cpus() - 1;
+       int cpu = smp_processor_id();
+
+       /*
+        * Can die spectacularly if this CPU isn't yet marked online
+        */
+       BUG_ON(!cpu_online(cpu));
+
+       if (!cpus)
+               return 0;
+
+       if (spin_is_locked(&smp_call_lock)) {
+               /*
+                * Some other processor is trying to make us do something
+                * but we're not going to respond... give up
+                */
+               return -1;
+               }
+
+       /*
+        * We will continue here, accepting the fact that
+        * the kernel may deadlock if another CPU attempts
+        * to call smp_call_function now...
+        */
+
+       data.func = kgdb_wait;
+       data.info = NULL;
+       atomic_set(&data.started, 0);
+       data.wait = 0;
+
+       spin_lock(&smp_call_lock);
+       call_data = &data;
+       mb();
+
+       /* Send a message to all other CPUs and wait for them to respond */
+       for (i = 0; i < NR_CPUS; i++)
+               if (cpu_online(i) && i != cpu)
+                       core_send_ipi(i, SMP_CALL_FUNCTION);
+
+       /* Wait for response */
+       /* FIXME: lock-up detection, backtrace on lock-up */
+       while (atomic_read(&data.started) != cpus)
+               barrier();
+
+       call_data = NULL;
+       spin_unlock(&smp_call_lock);
+#endif
+
+       return 0;
+}
 
 /*
  * This function does all command processing for interfacing to gdb.  It
@@ -718,7 +777,7 @@ void handle_exception (struct gdb_regs *regs)
        /*
         * force other cpus to enter kgdb
         */
-       smp_call_function(kgdb_wait, NULL, 0, 0);
+       kgdb_smp_call_kgdb_wait();
 
        /*
         * If we're in breakpoint() increment the PC
index 04418b6568b0fda36ccc7cb784827d4ee7a3a7e2..ff7af369f2862eedd4e8fbf08eb6eb3adf263010 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 
 #include <asm/asm.h>
+#include <asm/asmmacro.h>
 #include <asm/cacheops.h>
 #include <asm/regdef.h>
 #include <asm/fpregdef.h>
@@ -171,6 +172,15 @@ NESTED(except_vec_vi, 0, sp)
        SAVE_AT
        .set    push
        .set    noreorder
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * To keep from blindly blocking *all* interrupts
+        * during service by SMTC kernel, we also want to
+        * pass the IM value to be cleared.
+        */
+EXPORT(except_vec_vi_mori)
+       ori     a0, $0, 0
+#endif /* CONFIG_MIPS_MT_SMTC */
 EXPORT(except_vec_vi_lui)
        lui     v0, 0           /* Patched */
        j       except_vec_vi_handler
@@ -187,6 +197,25 @@ EXPORT(except_vec_vi_end)
 NESTED(except_vec_vi_handler, 0, sp)
        SAVE_TEMP
        SAVE_STATIC
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC has an interesting problem that interrupts are level-triggered,
+        * and the CLI macro will clear EXL, potentially causing a duplicate
+        * interrupt service invocation. So we need to clear the associated
+        * IM bit of Status prior to doing CLI, and restore it after the
+        * service routine has been invoked - we must assume that the
+        * service routine will have cleared the state, and any active
+        * level represents a new or otherwised unserviced event...
+        */
+       mfc0    t1, CP0_STATUS
+       and     t0, a0, t1
+       mfc0    t2, CP0_TCCONTEXT
+       or      t0, t0, t2
+       mtc0    t0, CP0_TCCONTEXT
+       xor     t1, t1, t0
+       mtc0    t1, CP0_STATUS
+       ehb
+#endif /* CONFIG_MIPS_MT_SMTC */
        CLI
        move    a0, sp
        jalr    v0
index 2e9122a4213a2215480637903b0948910ef6128c..bdf6f6eff721262d1c98a9dc48adba2b569adba9 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/threads.h>
 
 #include <asm/asm.h>
+#include <asm/asmmacro.h>
 #include <asm/regdef.h>
 #include <asm/page.h>
 #include <asm/mipsregs.h>
         */
        .macro  setup_c0_status set clr
        .set    push
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * For SMTC, we need to set privilege and disable interrupts only for
+        * the current TC, using the TCStatus register.
+        */
+       mfc0    t0, CP0_TCSTATUS
+       /* Fortunately CU 0 is in the same place in both registers */
+       /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
+       li      t1, ST0_CU0 | 0x08001c00
+       or      t0, t1
+       /* Clear TKSU, leave IXMT */
+       xori    t0, 0x00001800
+       mtc0    t0, CP0_TCSTATUS
+       ehb
+       /* We need to leave the global IE bit set, but clear EXL...*/
+       mfc0    t0, CP0_STATUS
+       or      t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr
+       xor     t0, ST0_EXL | ST0_ERL | \clr
+       mtc0    t0, CP0_STATUS
+#else
        mfc0    t0, CP0_STATUS
        or      t0, ST0_CU0|\set|0x1f|\clr
        xor     t0, 0x1f|\clr
        mtc0    t0, CP0_STATUS
        .set    noreorder
        sll     zero,3                          # ehb
+#endif
        .set    pop
        .endm
 
@@ -134,6 +156,24 @@ NESTED(kernel_entry, 16, sp)                       # kernel entry point
 
        ARC64_TWIDDLE_PC
 
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * In SMTC kernel, "CLI" is thread-specific, in TCStatus.
+        * We still need to enable interrupts globally in Status,
+        * and clear EXL/ERL.
+        *
+        * TCContext is used to track interrupt levels under
+        * service in SMTC kernel. Clear for boot TC before
+        * allowing any interrupts.
+        */
+       mtc0    zero, CP0_TCCONTEXT
+
+       mfc0    t0, CP0_STATUS
+       ori     t0, t0, 0xff1f
+       xori    t0, t0, 0x001e
+       mtc0    t0, CP0_STATUS
+#endif /* CONFIG_MIPS_MT_SMTC */
+
        PTR_LA          t0, __bss_start         # clear .bss
        LONG_S          zero, (t0)
        PTR_LA          t1, __bss_stop - LONGSIZE
@@ -166,8 +206,25 @@ NESTED(kernel_entry, 16, sp)                       # kernel entry point
  * function after setting up the stack and gp registers.
  */
 NESTED(smp_bootstrap, 16, sp)
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * Read-modify-writes of Status must be atomic, and this
+        * is one case where CLI is invoked without EXL being
+        * necessarily set. The CLI and setup_c0_status will
+        * in fact be redundant for all but the first TC of
+        * each VPE being booted.
+        */
+       DMT     10      # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
+       jal     mips_ihb
+#endif /* CONFIG_MIPS_MT_SMTC */
        setup_c0_status_sec
        smp_slave_setup
+#ifdef CONFIG_MIPS_MT_SMTC
+       andi    t2, t2, VPECONTROL_TE
+       beqz    t2, 2f
+       EMT             # emt
+2:
+#endif /* CONFIG_MIPS_MT_SMTC */
        j       start_secondary
        END(smp_bootstrap)
 #endif /* CONFIG_SMP */
index b974ac9057f616e5f3eb07882da80c3712762842..2125ba5f1d9b20b2d2e0fe282e6513b399546478 100644 (file)
@@ -187,6 +187,10 @@ handle_real_irq:
                outb(cached_21,0x21);
                outb(0x60+irq,0x20);    /* 'Specific EOI' to master */
        }
+#ifdef CONFIG_MIPS_MT_SMTC
+        if (irq_hwmask[irq] & ST0_IM)
+               set_c0_status(irq_hwmask[irq] & ST0_IM);
+#endif /* CONFIG_MIPS_MT_SMTC */
        spin_unlock_irqrestore(&i8259A_lock, flags);
        return;
 
index 3f653c7cfbf3d58eba3dbb300d96a13152a10aea..97ebdc754b9e6e8a59a1edf02611819872e606fb 100644 (file)
@@ -76,6 +76,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
        mask_msc_irq(irq);
        if (!cpu_has_veic)
                MSCIC_WRITE(MSC01_IC_EOI, 0);
+#ifdef CONFIG_MIPS_MT_SMTC
+       /* This actually needs to be a call into platform code */
+       if (irq_hwmask[irq] & ST0_IM)
+               set_c0_status(irq_hwmask[irq] & ST0_IM);
+#endif /* CONFIG_MIPS_MT_SMTC */
 }
 
 /*
@@ -92,6 +97,10 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
                MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
                MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
        }
+#ifdef CONFIG_MIPS_MT_SMTC
+       if (irq_hwmask[irq] & ST0_IM)
+               set_c0_status(irq_hwmask[irq] & ST0_IM);
+#endif /* CONFIG_MIPS_MT_SMTC */
 }
 
 /*
index e0efc4f2f93e23c05d9d6500553309fbfbf776f6..3dce742e716fd3df1b36920dc3606d2e91fb056b 100644 (file)
@@ -38,6 +38,15 @@ void ack_bad_irq(unsigned int irq)
 
 atomic_t irq_err_count;
 
+#ifdef CONFIG_MIPS_MT_SMTC
+/*
+ * SMTC Kernel needs to manipulate low-level CPU interrupt mask
+ * in do_IRQ. These are passed in setup_irq_smtc() and stored
+ * in this table.
+ */
+unsigned long irq_hwmask[NR_IRQS];
+#endif /* CONFIG_MIPS_MT_SMTC */
+
 #undef do_IRQ
 
 /*
@@ -49,6 +58,7 @@ asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs)
 {
        irq_enter();
 
+       __DO_IRQ_SMTC_HOOK();
        __do_IRQ(irq, regs);
 
        irq_exit();
@@ -129,6 +139,9 @@ void __init init_IRQ(void)
                irq_desc[i].depth   = 1;
                irq_desc[i].handler = &no_irq_type;
                spin_lock_init(&irq_desc[i].lock);
+#ifdef CONFIG_MIPS_MT_SMTC
+               irq_hwmask[i] = 0;
+#endif /* CONFIG_MIPS_MT_SMTC */
        }
 
        arch_init_irq();
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
new file mode 100644 (file)
index 0000000..02237a6
--- /dev/null
@@ -0,0 +1,449 @@
+/*
+ * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
+ * Copyright (C) 2005 Mips Technologies, Inc
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/mipsmtregs.h>
+#include <asm/r4kcache.h>
+#include <asm/cacheflush.h>
+
+/*
+ * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
+ */
+
+cpumask_t mt_fpu_cpumask;
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <asm/uaccess.h>
+
+unsigned long mt_fpemul_threshold = 0;
+
+/*
+ * Replacement functions for the sys_sched_setaffinity() and
+ * sys_sched_getaffinity() system calls, so that we can integrate
+ * FPU affinity with the user's requested processor affinity.
+ * This code is 98% identical with the sys_sched_setaffinity()
+ * and sys_sched_getaffinity() system calls, and should be
+ * updated when kernel/sched.c changes.
+ */
+
+/*
+ * find_process_by_pid - find a process with a matching PID value.
+ * used in sys_sched_set/getaffinity() in kernel/sched.c, so
+ * cloned here.
+ */
+static inline task_t *find_process_by_pid(pid_t pid)
+{
+       return pid ? find_task_by_pid(pid) : current;
+}
+
+
+/*
+ * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
+ */
+asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
+                                     unsigned long __user *user_mask_ptr)
+{
+       cpumask_t new_mask;
+       cpumask_t effective_mask;
+       int retval;
+       task_t *p;
+
+       if (len < sizeof(new_mask))
+               return -EINVAL;
+
+       if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
+               return -EFAULT;
+
+       lock_cpu_hotplug();
+       read_lock(&tasklist_lock);
+
+       p = find_process_by_pid(pid);
+       if (!p) {
+               read_unlock(&tasklist_lock);
+               unlock_cpu_hotplug();
+               return -ESRCH;
+       }
+
+       /*
+        * It is not safe to call set_cpus_allowed with the
+        * tasklist_lock held.  We will bump the task_struct's
+        * usage count and drop tasklist_lock before invoking
+        * set_cpus_allowed.
+        */
+       get_task_struct(p);
+
+       retval = -EPERM;
+       if ((current->euid != p->euid) && (current->euid != p->uid) &&
+                       !capable(CAP_SYS_NICE)) {
+               read_unlock(&tasklist_lock);
+               goto out_unlock;
+       }
+
+       /* Record new user-specified CPU set for future reference */
+       p->thread.user_cpus_allowed = new_mask;
+
+       /* Unlock the task list */
+       read_unlock(&tasklist_lock);
+
+       /* Compute new global allowed CPU set if necessary */
+       if( (p->thread.mflags & MF_FPUBOUND)
+       && cpus_intersects(new_mask, mt_fpu_cpumask)) {
+               cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
+               retval = set_cpus_allowed(p, effective_mask);
+       } else {
+               p->thread.mflags &= ~MF_FPUBOUND;
+               retval = set_cpus_allowed(p, new_mask);
+       }
+
+
+out_unlock:
+       put_task_struct(p);
+       unlock_cpu_hotplug();
+       return retval;
+}
+
+/*
+ * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
+ */
+asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
+                                     unsigned long __user *user_mask_ptr)
+{
+       unsigned int real_len;
+       cpumask_t mask;
+       int retval;
+       task_t *p;
+
+       real_len = sizeof(mask);
+       if (len < real_len)
+               return -EINVAL;
+
+       lock_cpu_hotplug();
+       read_lock(&tasklist_lock);
+
+       retval = -ESRCH;
+       p = find_process_by_pid(pid);
+       if (!p)
+               goto out_unlock;
+
+       retval = 0;
+
+       cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
+
+out_unlock:
+       read_unlock(&tasklist_lock);
+       unlock_cpu_hotplug();
+       if (retval)
+               return retval;
+       if (copy_to_user(user_mask_ptr, &mask, real_len))
+               return -EFAULT;
+       return real_len;
+}
+
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+/*
+ * Dump new MIPS MT state for the core. Does not leave TCs halted.
+ * Takes an argument which taken to be a pre-call MVPControl value.
+ */
+
+void mips_mt_regdump(unsigned long mvpctl)
+{
+       unsigned long flags;
+       unsigned long vpflags;
+       unsigned long mvpconf0;
+       int nvpe;
+       int ntc;
+       int i;
+       int tc;
+       unsigned long haltval;
+       unsigned long tcstatval;
+#ifdef CONFIG_MIPS_MT_SMTC
+       void smtc_soft_dump(void);
+#endif /* CONFIG_MIPT_MT_SMTC */
+
+       local_irq_save(flags);
+       vpflags = dvpe();
+       printk("=== MIPS MT State Dump ===\n");
+       printk("-- Global State --\n");
+       printk("   MVPControl Passed: %08lx\n", mvpctl);
+       printk("   MVPControl Read: %08lx\n", vpflags);
+       printk("   MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
+       nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+       ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+       printk("-- per-VPE State --\n");
+       for(i = 0; i < nvpe; i++) {
+           for(tc = 0; tc < ntc; tc++) {
+                       settc(tc);
+               if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
+                   printk("  VPE %d\n", i);
+                   printk("   VPEControl : %08lx\n", read_vpe_c0_vpecontrol());
+                   printk("   VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0());
+                   printk("   VPE%d.Status : %08lx\n",
+                               i, read_vpe_c0_status());
+                   printk("   VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc());
+                   printk("   VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause());
+                   printk("   VPE%d.Config7 : %08lx\n",
+                               i, read_vpe_c0_config7());
+                   break; /* Next VPE */
+               }
+           }
+       }
+       printk("-- per-TC State --\n");
+       for(tc = 0; tc < ntc; tc++) {
+               settc(tc);
+               if(read_tc_c0_tcbind() == read_c0_tcbind()) {
+                       /* Are we dumping ourself?  */
+                       haltval = 0; /* Then we're not halted, and mustn't be */
+                       tcstatval = flags; /* And pre-dump TCStatus is flags */
+                       printk("  TC %d (current TC with VPE EPC above)\n", tc);
+               } else {
+                       haltval = read_tc_c0_tchalt();
+                       write_tc_c0_tchalt(1);
+                       tcstatval = read_tc_c0_tcstatus();
+                       printk("  TC %d\n", tc);
+               }
+               printk("   TCStatus : %08lx\n", tcstatval);
+               printk("   TCBind : %08lx\n", read_tc_c0_tcbind());
+               printk("   TCRestart : %08lx\n", read_tc_c0_tcrestart());
+               printk("   TCHalt : %08lx\n", haltval);
+               printk("   TCContext : %08lx\n", read_tc_c0_tccontext());
+               if (!haltval)
+                       write_tc_c0_tchalt(0);
+       }
+#ifdef CONFIG_MIPS_MT_SMTC
+       smtc_soft_dump();
+#endif /* CONFIG_MIPT_MT_SMTC */
+       printk("===========================\n");
+       evpe(vpflags);
+       local_irq_restore(flags);
+}
+
+static int mt_opt_norps = 0;
+static int mt_opt_rpsctl = -1;
+static int mt_opt_nblsu = -1;
+static int mt_opt_forceconfig7 = 0;
+static int mt_opt_config7 = -1;
+
+static int __init rps_disable(char *s)
+{
+       mt_opt_norps = 1;
+       return 1;
+}
+__setup("norps", rps_disable);
+
+static int __init rpsctl_set(char *str)
+{
+       get_option(&str, &mt_opt_rpsctl);
+       return 1;
+}
+__setup("rpsctl=", rpsctl_set);
+
+static int __init nblsu_set(char *str)
+{
+       get_option(&str, &mt_opt_nblsu);
+       return 1;
+}
+__setup("nblsu=", nblsu_set);
+
+static int __init config7_set(char *str)
+{
+       get_option(&str, &mt_opt_config7);
+       mt_opt_forceconfig7 = 1;
+       return 1;
+}
+__setup("config7=", config7_set);
+
+/* Experimental cache flush control parameters that should go away some day */
+int mt_protiflush = 0;
+int mt_protdflush = 0;
+int mt_n_iflushes = 1;
+int mt_n_dflushes = 1;
+
+static int __init set_protiflush(char *s)
+{
+       mt_protiflush = 1;
+       return 1;
+}
+__setup("protiflush", set_protiflush);
+
+static int __init set_protdflush(char *s)
+{
+       mt_protdflush = 1;
+       return 1;
+}
+__setup("protdflush", set_protdflush);
+
+static int __init niflush(char *s)
+{
+       get_option(&s, &mt_n_iflushes);
+       return 1;
+}
+__setup("niflush=", niflush);
+
+static int __init ndflush(char *s)
+{
+       get_option(&s, &mt_n_dflushes);
+       return 1;
+}
+__setup("ndflush=", ndflush);
+#ifdef CONFIG_MIPS_MT_FPAFF
+static int fpaff_threshold = -1;
+
+static int __init fpaff_thresh(char *str)
+{
+       get_option(&str, &fpaff_threshold);
+       return 1;
+}
+
+__setup("fpaff=", fpaff_thresh);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+static unsigned int itc_base = 0;
+
+static int __init set_itc_base(char *str)
+{
+       get_option(&str, &itc_base);
+       return 1;
+}
+
+__setup("itcbase=", set_itc_base);
+
+void mips_mt_set_cpuoptions(void)
+{
+       unsigned int oconfig7 = read_c0_config7();
+       unsigned int nconfig7 = oconfig7;
+
+       if (mt_opt_norps) {
+               printk("\"norps\" option deprectated: use \"rpsctl=\"\n");
+       }
+       if (mt_opt_rpsctl >= 0) {
+               printk("34K return prediction stack override set to %d.\n",
+                       mt_opt_rpsctl);
+               if (mt_opt_rpsctl)
+                       nconfig7 |= (1 << 2);
+               else
+                       nconfig7 &= ~(1 << 2);
+       }
+       if (mt_opt_nblsu >= 0) {
+               printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
+               if (mt_opt_nblsu)
+                       nconfig7 |= (1 << 5);
+               else
+                       nconfig7 &= ~(1 << 5);
+       }
+       if (mt_opt_forceconfig7) {
+               printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
+               nconfig7 = mt_opt_config7;
+       }
+       if (oconfig7 != nconfig7) {
+               __asm__ __volatile("sync");
+               write_c0_config7(nconfig7);
+               ehb ();
+               printk("Config7: 0x%08x\n", read_c0_config7());
+       }
+
+       /* Report Cache management debug options */
+       if (mt_protiflush)
+               printk("I-cache flushes single-threaded\n");
+       if (mt_protdflush)
+               printk("D-cache flushes single-threaded\n");
+       if (mt_n_iflushes != 1)
+               printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
+       if (mt_n_dflushes != 1)
+               printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+       /* FPU Use Factor empirically derived from experiments on 34K */
+#define FPUSEFACTOR 333
+
+       if (fpaff_threshold >= 0) {
+               mt_fpemul_threshold = fpaff_threshold;
+       } else {
+               mt_fpemul_threshold =
+                       (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
+       }
+       printk("FPU Affinity set after %ld emulations\n",
+                       mt_fpemul_threshold);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+       if (itc_base != 0) {
+               /*
+                * Configure ITC mapping.  This code is very
+                * specific to the 34K core family, which uses
+                * a special mode bit ("ITC") in the ErrCtl
+                * register to enable access to ITC control
+                * registers via cache "tag" operations.
+                */
+               unsigned long ectlval;
+               unsigned long itcblkgrn;
+
+               /* ErrCtl register is known as "ecc" to Linux */
+               ectlval = read_c0_ecc();
+               write_c0_ecc(ectlval | (0x1 << 26));
+               ehb();
+#define INDEX_0 (0x80000000)
+#define INDEX_8 (0x80000008)
+               /* Read "cache tag" for Dcache pseudo-index 8 */
+               cache_op(Index_Load_Tag_D, INDEX_8);
+               ehb();
+               itcblkgrn = read_c0_dtaglo();
+               itcblkgrn &= 0xfffe0000;
+               /* Set for 128 byte pitch of ITC cells */
+               itcblkgrn |= 0x00000c00;
+               /* Stage in Tag register */
+               write_c0_dtaglo(itcblkgrn);
+               ehb();
+               /* Write out to ITU with CACHE op */
+               cache_op(Index_Store_Tag_D, INDEX_8);
+               /* Now set base address, and turn ITC on with 0x1 bit */
+               write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
+               ehb();
+               /* Write out to ITU with CACHE op */
+               cache_op(Index_Store_Tag_D, INDEX_0);
+               write_c0_ecc(ectlval);
+               ehb();
+               printk("Mapped %ld ITC cells starting at 0x%08x\n",
+                       ((itcblkgrn & 0x7fe00000) >> 20), itc_base);
+       }
+}
+
+/*
+ * Function to protect cache flushes from concurrent execution
+ * depends on MP software model chosen.
+ */
+
+void mt_cflush_lockdown(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+       void smtc_cflush_lockdown(void);
+
+       smtc_cflush_lockdown();
+#endif /* CONFIG_MIPS_MT_SMTC */
+       /* FILL IN VSMP and AP/SP VERSIONS HERE */
+}
+
+void mt_cflush_release(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+       void smtc_cflush_release(void);
+
+       smtc_cflush_release();
+#endif /* CONFIG_MIPS_MT_SMTC */
+       /* FILL IN VSMP and AP/SP VERSIONS HERE */
+}
index c66db5e5ab624f9c2b0d3e7256539c6f0932cb97..8b393df460a28dc2fb419a16700bdf28e4837241 100644 (file)
 #include <asm/elf.h>
 #include <asm/isadep.h>
 #include <asm/inst.h>
+#ifdef CONFIG_MIPS_MT_SMTC
+#include <asm/mipsmtregs.h>
+extern void smtc_idle_loop_hook(void);
+#endif /* CONFIG_MIPS_MT_SMTC */
 
 /*
  * The idle thread. There's no useful work to be done, so just try to conserve
@@ -51,9 +55,13 @@ ATTRIB_NORET void cpu_idle(void)
 {
        /* endless idle loop with no priority at all */
        while (1) {
-               while (!need_resched())
+               while (!need_resched()) {
+#ifdef CONFIG_MIPS_MT_SMTC
+                       smtc_idle_loop_hook();
+#endif /* CONFIG_MIPS_MT_SMTC */
                        if (cpu_wait)
                                (*cpu_wait)();
+               }
                preempt_enable_no_resched();
                schedule();
                preempt_disable();
index f838b36cc765bcbc9ad544701aec14e2ff29c278..f3106d0771b0707ba21a767c32ed852530bc78d9 100644 (file)
@@ -248,10 +248,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        break;
                case FPC_EIR: { /* implementation / version register */
                        unsigned int flags;
+#ifdef CONFIG_MIPS_MT_SMTC
+                       unsigned int irqflags;
+                       unsigned int mtflags;
+#endif /* CONFIG_MIPS_MT_SMTC */
 
                        if (!cpu_has_fpu)
                                break;
 
+#ifdef CONFIG_MIPS_MT_SMTC
+                       /* Read-modify-write of Status must be atomic */
+                       local_irq_save(irqflags);
+                       mtflags = dmt();
+#endif /* CONFIG_MIPS_MT_SMTC */
+
                        preempt_disable();
                        if (cpu_has_mipsmt) {
                                unsigned int vpflags = dvpe();
@@ -266,6 +276,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                                __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
                                write_c0_status(flags);
                        }
+#ifdef CONFIG_MIPS_MT_SMTC
+                       emt(mtflags);
+                       local_irq_restore(irqflags);
+#endif /* CONFIG_MIPS_MT_SMTC */
                        preempt_enable();
                        break;
                }
index 0d5cf97af727e3c39fd6c3e3fa965979c2a517bf..8704dc0496ea7f2427b4fd111dbbb54397955b0f 100644 (file)
@@ -173,12 +173,22 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
                        break;
                case FPC_EIR: { /* implementation / version register */
                        unsigned int flags;
+#ifdef CONFIG_MIPS_MT_SMTC
+                       unsigned int irqflags;
+                       unsigned int mtflags;
+#endif /* CONFIG_MIPS_MT_SMTC */
 
                        if (!cpu_has_fpu) {
                                tmp = 0;
                                break;
                        }
 
+#ifdef CONFIG_MIPS_MT_SMTC
+                       /* Read-modify-write of Status must be atomic */
+                       local_irq_save(irqflags);
+                       mtflags = dmt();
+#endif /* CONFIG_MIPS_MT_SMTC */
+
                        preempt_disable();
                        if (cpu_has_mipsmt) {
                                unsigned int vpflags = dvpe();
@@ -193,6 +203,10 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
                                __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
                                write_c0_status(flags);
                        }
+#ifdef CONFIG_MIPS_MT_SMTC
+                       emt(mtflags);
+                       local_irq_restore(irqflags);
+#endif /* CONFIG_MIPS_MT_SMTC */
                        preempt_enable();
                        break;
                }
index d2afbd19a9c8a0c013b4170008d55a18d701e61c..0b1b54acee9ffeb538a6667da285ee6c82a54777 100644 (file)
 
        PTR_ADDIU       t0, $28, _THREAD_SIZE - 32
        set_saved_sp    t0, t1, t2
-
+#ifdef CONFIG_MIPS_MT_SMTC
+       /* Read-modify-writes of Status must be atomic on a VPE */
+       mfc0    t2, CP0_TCSTATUS
+       ori     t1, t2, TCSTATUS_IXMT
+       mtc0    t1, CP0_TCSTATUS
+       andi    t2, t2, TCSTATUS_IXMT
+       ehb
+       DMT     8                               # dmt   t0
+       move    t1,ra
+       jal     mips_ihb
+       move    ra,t1
+#endif /* CONFIG_MIPS_MT_SMTC */
        mfc0    t1, CP0_STATUS          /* Do we really need this? */
        li      a3, 0xff01
        and     t1, a3
        and     a2, a3
        or      a2, t1
        mtc0    a2, CP0_STATUS
+#ifdef CONFIG_MIPS_MT_SMTC
+       ehb
+       andi    t0, t0, VPECONTROL_TE
+       beqz    t0, 1f
+       emt
+1:
+       mfc0    t1, CP0_TCSTATUS
+       xori    t1, t1, TCSTATUS_IXMT
+       or      t1, t1, t2
+       mtc0    t1, CP0_TCSTATUS
+       ehb
+#endif /* CONFIG_MIPS_MT_SMTC */
        move    v0, a0
        jr      ra
        END(resume)
@@ -131,10 +154,19 @@ LEAF(_restore_fp)
 #define FPU_DEFAULT  0x00000000
 
 LEAF(_init_fpu)
+#ifdef CONFIG_MIPS_MT_SMTC
+       /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
+       mfc0    t0, CP0_TCSTATUS
+       /* Bit position is the same for Status, TCStatus */
+       li      t1, ST0_CU1
+       or      t0, t1
+       mtc0    t0, CP0_TCSTATUS
+#else /* Normal MIPS CU1 enable */
        mfc0    t0, CP0_STATUS
        li      t1, ST0_CU1
        or      t0, t1
        mtc0    t0, CP0_STATUS
+#endif /* CONFIG_MIPS_MT_SMTC */
        fpu_enable_hazard
 
        li      t1, FPU_DEFAULT
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
new file mode 100644 (file)
index 0000000..19b8e4b
--- /dev/null
@@ -0,0 +1,349 @@
+/*
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Copyright (C) 2004, 05, 06 MIPS Technologies, Inc.
+ *    Elizabeth Clarke (beth@mips.com)
+ *    Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+
+#include <asm/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/time.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+#include <asm/mips-boards/maltaint.h>  /* This is f*cking wrong */
+
+#define MIPS_CPU_IPI_RESCHED_IRQ 0
+#define MIPS_CPU_IPI_CALL_IRQ 1
+
+static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
+
+#if 0
+static void dump_mtregisters(int vpe, int tc)
+{
+       printk("vpe %d tc %d\n", vpe, tc);
+
+       settc(tc);
+
+       printk("  c0 status  0x%lx\n", read_vpe_c0_status());
+       printk("  vpecontrol 0x%lx\n", read_vpe_c0_vpecontrol());
+       printk("  vpeconf0    0x%lx\n", read_vpe_c0_vpeconf0());
+       printk("  tcstatus 0x%lx\n", read_tc_c0_tcstatus());
+       printk("  tcrestart 0x%lx\n", read_tc_c0_tcrestart());
+       printk("  tcbind 0x%lx\n", read_tc_c0_tcbind());
+       printk("  tchalt 0x%lx\n", read_tc_c0_tchalt());
+}
+#endif
+
+void __init sanitize_tlb_entries(void)
+{
+       int i, tlbsiz;
+       unsigned long mvpconf0, ncpu;
+
+       if (!cpu_has_mipsmt)
+               return;
+
+       /* Enable VPC */
+       set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+       back_to_back_c0_hazard();
+
+       /* Disable TLB sharing */
+       clear_c0_mvpcontrol(MVPCONTROL_STLB);
+
+       mvpconf0 = read_c0_mvpconf0();
+
+       printk(KERN_INFO "MVPConf0 0x%lx TLBS %lx PTLBE %ld\n", mvpconf0,
+                  (mvpconf0 & MVPCONF0_TLBS) >> MVPCONF0_TLBS_SHIFT,
+                          (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT);
+
+       tlbsiz = (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT;
+       ncpu = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+
+       printk(" tlbsiz %d ncpu %ld\n", tlbsiz, ncpu);
+
+       if (tlbsiz > 0) {
+               /* share them out across the vpe's */
+               tlbsiz /= ncpu;
+
+               printk(KERN_INFO "setting Config1.MMU_size to %d\n", tlbsiz);
+
+               for (i = 0; i < ncpu; i++) {
+                       settc(i);
+
+                       if (i == 0)
+                               write_c0_config1((read_c0_config1() & ~(0x3f << 25)) | (tlbsiz << 25));
+                       else
+                               write_vpe_c0_config1((read_vpe_c0_config1() & ~(0x3f << 25)) |
+                                                  (tlbsiz << 25));
+               }
+       }
+
+       clear_c0_mvpcontrol(MVPCONTROL_VPC);
+}
+
+static void ipi_resched_dispatch (struct pt_regs *regs)
+{
+       do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ, regs);
+}
+
+static void ipi_call_dispatch (struct pt_regs *regs)
+{
+       do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ, regs);
+}
+
+irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+       return IRQ_HANDLED;
+}
+
+irqreturn_t ipi_call_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+       smp_call_function_interrupt();
+
+       return IRQ_HANDLED;
+}
+
+static struct irqaction irq_resched = {
+       .handler        = ipi_resched_interrupt,
+       .flags          = SA_INTERRUPT,
+       .name           = "IPI_resched"
+};
+
+static struct irqaction irq_call = {
+       .handler        = ipi_call_interrupt,
+       .flags          = SA_INTERRUPT,
+       .name           = "IPI_call"
+};
+
+/*
+ * Common setup before any secondaries are started
+ * Make sure all CPU's are in a sensible state before we boot any of the
+ * secondarys
+ */
+void plat_smp_setup(void)
+{
+       unsigned long val;
+       int i, num;
+
+       if (!cpu_has_mipsmt)
+               return;
+
+       /* disable MT so we can configure */
+       dvpe();
+       dmt();
+
+       mips_mt_set_cpuoptions();
+
+       /* Put MVPE's into 'configuration state' */
+       set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+       val = read_c0_mvpconf0();
+
+       /* we'll always have more TC's than VPE's, so loop setting everything
+          to a sensible state */
+       for (i = 0, num = 0; i <= ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT); i++) {
+               settc(i);
+
+               /* VPE's */
+               if (i <= ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) {
+
+                       /* deactivate all but vpe0 */
+                       if (i != 0) {
+                               unsigned long tmp = read_vpe_c0_vpeconf0();
+
+                               tmp &= ~VPECONF0_VPA;
+
+                               /* master VPE */
+                               tmp |= VPECONF0_MVP;
+                               write_vpe_c0_vpeconf0(tmp);
+
+                               /* Record this as available CPU */
+                               cpu_set(i, phys_cpu_present_map);
+                               __cpu_number_map[i]     = ++num;
+                               __cpu_logical_map[num]  = i;
+                       }
+
+                       /* disable multi-threading with TC's */
+                       write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
+
+                       if (i != 0) {
+                               write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
+
+                               /* set config to be the same as vpe0, particularly kseg0 coherency alg */
+                               write_vpe_c0_config( read_c0_config());
+
+                               /* make sure there are no software interrupts pending */
+                               write_vpe_c0_cause(read_vpe_c0_cause() & ~(C_SW1|C_SW0));
+
+                               /* Propagate Config7 */
+                               write_vpe_c0_config7(read_c0_config7());
+                       }
+
+               }
+
+               /* TC's */
+
+               if (i != 0) {
+                       unsigned long tmp;
+
+                       /* bind a TC to each VPE, May as well put all excess TC's
+                          on the last VPE */
+                       if ( i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1) )
+                               write_tc_c0_tcbind(read_tc_c0_tcbind() | ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) );
+                       else {
+                               write_tc_c0_tcbind( read_tc_c0_tcbind() | i);
+
+                               /* and set XTC */
+                               write_vpe_c0_vpeconf0( read_vpe_c0_vpeconf0() | (i << VPECONF0_XTC_SHIFT));
+                       }
+
+                       tmp = read_tc_c0_tcstatus();
+
+                       /* mark not allocated and not dynamically allocatable */
+                       tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
+                       tmp |= TCSTATUS_IXMT;           /* interrupt exempt */
+                       write_tc_c0_tcstatus(tmp);
+
+                       write_tc_c0_tchalt(TCHALT_H);
+               }
+       }
+
+       /* Release config state */
+       clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+       /* We'll wait until starting the secondaries before starting MVPE */
+
+       printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
+}
+
+void __init plat_prepare_cpus(unsigned int max_cpus)
+{
+       /* set up ipi interrupts */
+       if (cpu_has_vint) {
+               set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
+               set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
+       }
+
+       cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
+       cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ;
+
+       setup_irq(cpu_ipi_resched_irq, &irq_resched);
+       setup_irq(cpu_ipi_call_irq, &irq_call);
+
+       /* need to mark IPI's as IRQ_PER_CPU */
+       irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU;
+       irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU;
+}
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it
+ * running!
+ * smp_bootstrap is the place to resume from
+ * __KSTK_TOS(idle) is apparently the stack pointer
+ * (unsigned long)idle->thread_info the gp
+ * assumes a 1:1 mapping of TC => VPE
+ */
+void prom_boot_secondary(int cpu, struct task_struct *idle)
+{
+       struct thread_info *gp = task_thread_info(idle);
+       dvpe();
+       set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+       settc(cpu);
+
+       /* restart */
+       write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
+
+       /* enable the tc this vpe/cpu will be running */
+       write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A);
+
+       write_tc_c0_tchalt(0);
+
+       /* enable the VPE */
+       write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
+
+       /* stack pointer */
+       write_tc_gpr_sp( __KSTK_TOS(idle));
+
+       /* global pointer */
+       write_tc_gpr_gp((unsigned long)gp);
+
+       flush_icache_range((unsigned long)gp,
+                          (unsigned long)(gp + sizeof(struct thread_info)));
+
+       /* finally out of configuration and into chaos */
+       clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+       evpe(EVPE_ENABLE);
+}
+
+void prom_init_secondary(void)
+{
+       write_c0_status((read_c0_status() & ~ST0_IM ) |
+                       (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7));
+}
+
+void prom_smp_finish(void)
+{
+       write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
+
+       local_irq_enable();
+}
+
+void prom_cpus_done(void)
+{
+}
+
+void core_send_ipi(int cpu, unsigned int action)
+{
+       int i;
+       unsigned long flags;
+       int vpflags;
+
+       local_irq_save (flags);
+
+       vpflags = dvpe();       /* cant access the other CPU's registers whilst MVPE enabled */
+
+       switch (action) {
+       case SMP_CALL_FUNCTION:
+               i = C_SW1;
+               break;
+
+       case SMP_RESCHEDULE_YOURSELF:
+       default:
+               i = C_SW0;
+               break;
+       }
+
+       /* 1:1 mapping of vpe and tc... */
+       settc(cpu);
+       write_vpe_c0_cause(read_vpe_c0_cause() | i);
+       evpe(vpflags);
+
+       local_irq_restore(flags);
+}
index 72a287aa937ec1db02fd240b812640c33a1deb84..d42f358754ad829129663f01350d7c0757f8f078 100644 (file)
 #include <asm/mmu_context.h>
 #include <asm/smp.h>
 
+#ifdef CONFIG_MIPS_MT_SMTC
+#include <asm/mipsmtregs.h>
+#endif /* CONFIG_MIPS_MT_SMTC */
+
 cpumask_t phys_cpu_present_map;                /* Bitmask of available CPUs */
 volatile cpumask_t cpu_callin_map;     /* Bitmask of started secondaries */
 cpumask_t cpu_online_map;              /* Bitmask of currently online CPUs */
@@ -85,6 +89,10 @@ asmlinkage void start_secondary(void)
 {
        unsigned int cpu;
 
+#ifdef CONFIG_MIPS_MT_SMTC
+       /* Only do cpu_probe for first TC of CPU */
+       if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
+#endif /* CONFIG_MIPS_MT_SMTC */
        cpu_probe();
        cpu_report();
        per_cpu_trap_init();
@@ -179,11 +187,13 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
        if (wait)
                while (atomic_read(&data.finished) != cpus)
                        barrier();
+       call_data = NULL;
        spin_unlock(&smp_call_lock);
 
        return 0;
 }
 
+
 void smp_call_function_interrupt(void)
 {
        void (*func) (void *info) = call_data->func;
diff --git a/arch/mips/kernel/smp_mt.c b/arch/mips/kernel/smp_mt.c
deleted file mode 100644 (file)
index 993b8bf..0000000
+++ /dev/null
@@ -1,342 +0,0 @@
-/*
- * Copyright (C) 2004, 2005 MIPS Technologies, Inc.  All rights reserved.
- *
- *  Elizabeth Clarke (beth@mips.com)
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- */
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/cpumask.h>
-#include <linux/interrupt.h>
-#include <linux/compiler.h>
-
-#include <asm/atomic.h>
-#include <asm/cpu.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/hardirq.h>
-#include <asm/mmu_context.h>
-#include <asm/smp.h>
-#include <asm/time.h>
-#include <asm/mipsregs.h>
-#include <asm/mipsmtregs.h>
-#include <asm/cacheflush.h>
-#include <asm/mips-boards/maltaint.h>
-
-#define MIPS_CPU_IPI_RESCHED_IRQ 0
-#define MIPS_CPU_IPI_CALL_IRQ 1
-
-static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
-
-#if 0
-static void dump_mtregisters(int vpe, int tc)
-{
-       printk("vpe %d tc %d\n", vpe, tc);
-
-       settc(tc);
-
-       printk("  c0 status  0x%lx\n", read_vpe_c0_status());
-       printk("  vpecontrol 0x%lx\n", read_vpe_c0_vpecontrol());
-       printk("  vpeconf0    0x%lx\n", read_vpe_c0_vpeconf0());
-       printk("  tcstatus 0x%lx\n", read_tc_c0_tcstatus());
-       printk("  tcrestart 0x%lx\n", read_tc_c0_tcrestart());
-       printk("  tcbind 0x%lx\n", read_tc_c0_tcbind());
-       printk("  tchalt 0x%lx\n", read_tc_c0_tchalt());
-}
-#endif
-
-void __init sanitize_tlb_entries(void)
-{
-       int i, tlbsiz;
-       unsigned long mvpconf0, ncpu;
-
-       if (!cpu_has_mipsmt)
-               return;
-
-       set_c0_mvpcontrol(MVPCONTROL_VPC);
-
-       back_to_back_c0_hazard();
-
-       /* Disable TLB sharing */
-       clear_c0_mvpcontrol(MVPCONTROL_STLB);
-
-       mvpconf0 = read_c0_mvpconf0();
-
-       printk(KERN_INFO "MVPConf0 0x%lx TLBS %lx PTLBE %ld\n", mvpconf0,
-                  (mvpconf0 & MVPCONF0_TLBS) >> MVPCONF0_TLBS_SHIFT,
-                          (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT);
-
-       tlbsiz = (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT;
-       ncpu = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
-
-       printk(" tlbsiz %d ncpu %ld\n", tlbsiz, ncpu);
-
-       if (tlbsiz > 0) {
-               /* share them out across the vpe's */
-               tlbsiz /= ncpu;
-
-               printk(KERN_INFO "setting Config1.MMU_size to %d\n", tlbsiz);
-
-               for (i = 0; i < ncpu; i++) {
-                       settc(i);
-
-                       if (i == 0)
-                               write_c0_config1((read_c0_config1() & ~(0x3f << 25)) | (tlbsiz << 25));
-                       else
-                               write_vpe_c0_config1((read_vpe_c0_config1() & ~(0x3f << 25)) |
-                                                  (tlbsiz << 25));
-               }
-       }
-
-       clear_c0_mvpcontrol(MVPCONTROL_VPC);
-}
-
-static void ipi_resched_dispatch (struct pt_regs *regs)
-{
-       do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ, regs);
-}
-
-static void ipi_call_dispatch (struct pt_regs *regs)
-{
-       do_IRQ(MIPS_CPU_IPI_CALL_IRQ, regs);
-}
-
-irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
-       return IRQ_HANDLED;
-}
-
-irqreturn_t ipi_call_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
-       smp_call_function_interrupt();
-
-       return IRQ_HANDLED;
-}
-
-static struct irqaction irq_resched = {
-       .handler        = ipi_resched_interrupt,
-       .flags          = SA_INTERRUPT,
-       .name           = "IPI_resched"
-};
-
-static struct irqaction irq_call = {
-       .handler        = ipi_call_interrupt,
-       .flags          = SA_INTERRUPT,
-       .name           = "IPI_call"
-};
-
-/*
- * Common setup before any secondaries are started
- * Make sure all CPU's are in a sensible state before we boot any of the
- * secondarys
- */
-void plat_smp_setup(void)
-{
-       unsigned long val;
-       int i, num;
-
-       if (!cpu_has_mipsmt)
-               return;
-
-       /* disable MT so we can configure */
-       dvpe();
-       dmt();
-
-       /* Put MVPE's into 'configuration state' */
-       set_c0_mvpcontrol(MVPCONTROL_VPC);
-
-       val = read_c0_mvpconf0();
-
-       /* we'll always have more TC's than VPE's, so loop setting everything
-          to a sensible state */
-       for (i = 0, num = 0; i <= ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT); i++) {
-               settc(i);
-
-               /* VPE's */
-               if (i <= ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) {
-
-                       /* deactivate all but vpe0 */
-                       if (i != 0) {
-                               unsigned long tmp = read_vpe_c0_vpeconf0();
-
-                               tmp &= ~VPECONF0_VPA;
-
-                               /* master VPE */
-                               tmp |= VPECONF0_MVP;
-                               write_vpe_c0_vpeconf0(tmp);
-
-                               /* Record this as available CPU */
-                               cpu_set(i, phys_cpu_present_map);
-                               __cpu_number_map[i]     = ++num;
-                               __cpu_logical_map[num]  = i;
-                       }
-
-                       /* disable multi-threading with TC's */
-                       write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
-
-                       if (i != 0) {
-                               write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
-                               write_vpe_c0_cause(read_vpe_c0_cause() & ~CAUSEF_IP);
-
-                               /* set config to be the same as vpe0, particularly kseg0 coherency alg */
-                               write_vpe_c0_config( read_c0_config());
-
-                               /* Propagate Config7 */
-                               write_vpe_c0_config7(read_c0_config7());
-                       }
-
-               }
-
-               /* TC's */
-
-               if (i != 0) {
-                       unsigned long tmp;
-
-                       /* bind a TC to each VPE, May as well put all excess TC's
-                          on the last VPE */
-                       if ( i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1) )
-                               write_tc_c0_tcbind(read_tc_c0_tcbind() | ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) );
-                       else {
-                               write_tc_c0_tcbind( read_tc_c0_tcbind() | i);
-
-                               /* and set XTC */
-                               write_vpe_c0_vpeconf0( read_vpe_c0_vpeconf0() | (i << VPECONF0_XTC_SHIFT));
-                       }
-
-                       tmp = read_tc_c0_tcstatus();
-
-                       /* mark not allocated and not dynamically allocatable */
-                       tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
-                       tmp |= TCSTATUS_IXMT;           /* interrupt exempt */
-                       write_tc_c0_tcstatus(tmp);
-
-                       write_tc_c0_tchalt(TCHALT_H);
-               }
-       }
-
-       /* Release config state */
-       clear_c0_mvpcontrol(MVPCONTROL_VPC);
-
-       /* We'll wait until starting the secondaries before starting MVPE */
-
-       printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
-
-       /* set up ipi interrupts */
-       if (cpu_has_vint) {
-               set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
-               set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
-       }
-}
-
-void __init plat_prepare_cpus(unsigned int max_cpus)
-{
-       cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
-       cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ;
-
-       setup_irq(cpu_ipi_resched_irq, &irq_resched);
-       setup_irq(cpu_ipi_call_irq, &irq_call);
-
-       /* need to mark IPI's as IRQ_PER_CPU */
-       irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU;
-       irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU;
-}
-
-/*
- * Setup the PC, SP, and GP of a secondary processor and start it
- * running!
- * smp_bootstrap is the place to resume from
- * __KSTK_TOS(idle) is apparently the stack pointer
- * (unsigned long)idle->thread_info the gp
- * assumes a 1:1 mapping of TC => VPE
- */
-void prom_boot_secondary(int cpu, struct task_struct *idle)
-{
-       struct thread_info *gp = task_thread_info(idle);
-       dvpe();
-       set_c0_mvpcontrol(MVPCONTROL_VPC);
-
-       settc(cpu);
-
-       /* restart */
-       write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
-
-       /* enable the tc this vpe/cpu will be running */
-       write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A);
-
-       write_tc_c0_tchalt(0);
-
-       /* enable the VPE */
-       write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
-
-       /* stack pointer */
-       write_tc_gpr_sp( __KSTK_TOS(idle));
-
-       /* global pointer */
-       write_tc_gpr_gp((unsigned long)gp);
-
-       flush_icache_range((unsigned long)gp, (unsigned long)(gp + 1));
-
-       /* finally out of configuration and into chaos */
-       clear_c0_mvpcontrol(MVPCONTROL_VPC);
-
-       evpe(EVPE_ENABLE);
-}
-
-void prom_init_secondary(void)
-{
-       write_c0_status((read_c0_status() & ~ST0_IM ) |
-                       (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7));
-}
-
-void prom_smp_finish(void)
-{
-       write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
-
-       local_irq_enable();
-}
-
-void prom_cpus_done(void)
-{
-}
-
-void core_send_ipi(int cpu, unsigned int action)
-{
-       int i;
-       unsigned long flags;
-       int vpflags;
-
-       local_irq_save (flags);
-
-       vpflags = dvpe();       /* cant access the other CPU's registers whilst MVPE enabled */
-
-       switch (action) {
-       case SMP_CALL_FUNCTION:
-               i = C_SW1;
-               break;
-
-       case SMP_RESCHEDULE_YOURSELF:
-       default:
-               i = C_SW0;
-               break;
-       }
-
-       /* 1:1 mapping of vpe and tc... */
-       settc(cpu);
-       write_vpe_c0_cause(read_vpe_c0_cause() | i);
-       evpe(vpflags);
-
-       local_irq_restore(flags);
-}
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
new file mode 100644 (file)
index 0000000..c9d6519
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * Assembly Language Functions for MIPS MT SMTC support
+ */
+
+/*
+ * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */
+
+#include <asm/regdef.h>
+#include <asm/asmmacro.h>
+#include <asm/stackframe.h>
+#include <asm/stackframe.h>
+
+/*
+ * "Software Interrupt" linkage.
+ *
+ * This is invoked when an "Interrupt" is sent from one TC to another,
+ * where the TC to be interrupted is halted, has it's Restart address
+ * and Status values saved by the "remote control" thread, then modified
+ * to cause execution to begin here, in kenel mode. This code then
+ * disguises the TC state as that of an exception and transfers
+ * control to the general exception or vectored interrupt handler.
+ */
+       .set noreorder
+
+/*
+The __smtc_ipi_vector would use k0 and k1 as temporaries and
+1) Set EXL (this is per-VPE, so this can't be done by proxy!)
+2) Restore the K/CU and IXMT bits to the pre "exception" state
+   (EXL means no interrupts and access to the kernel map).
+3) Set EPC to be the saved value of TCRestart.
+4) Jump to the exception handler entry point passed by the sender.
+
+CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
+*/
+
+/*
+ * Reviled and slandered vision: Set EXL and restore K/CU/IXMT
+ * state of pre-halt thread, then save everything and call
+ * thought some function pointer to imaginary_exception, which
+ * will parse a register value or memory message queue to
+ * deliver things like interprocessor interrupts. On return
+ * from that function, jump to the global ret_from_irq code
+ * to invoke the scheduler and return as appropriate.
+ */
+
+#define PT_PADSLOT4 (PT_R0-8)
+#define PT_PADSLOT5 (PT_R0-4)
+
+       .text
+       .align 5
+FEXPORT(__smtc_ipi_vector)
+       .set    noat
+       /* Disable thread scheduling to make Status update atomic */
+       DMT     27                                      # dmt   k1
+       ehb
+       /* Set EXL */
+       mfc0    k0,CP0_STATUS
+       ori     k0,k0,ST0_EXL
+       mtc0    k0,CP0_STATUS
+       ehb
+       /* Thread scheduling now inhibited by EXL. Restore TE state. */
+       andi    k1,k1,VPECONTROL_TE
+       beqz    k1,1f
+       emt
+1:
+       /*
+        * The IPI sender has put some information on the anticipated
+        * kernel stack frame.  If we were in user mode, this will be
+        * built above the saved kernel SP.  If we were already in the
+        * kernel, it will be built above the current CPU SP.
+        *
+        * Were we in kernel mode, as indicated by CU0?
+        */
+       sll     k1,k0,3
+       .set noreorder
+       bltz    k1,2f
+       move    k1,sp
+       .set reorder
+       /*
+        * If previously in user mode, set CU0 and use kernel stack.
+        */
+       li      k1,ST0_CU0
+       or      k1,k1,k0
+       mtc0    k1,CP0_STATUS
+       ehb
+       get_saved_sp
+       /* Interrupting TC will have pre-set values in slots in the new frame */
+2:     subu    k1,k1,PT_SIZE
+       /* Load TCStatus Value */
+       lw      k0,PT_TCSTATUS(k1)
+       /* Write it to TCStatus to restore CU/KSU/IXMT state */
+       mtc0    k0,$2,1
+       ehb
+       lw      k0,PT_EPC(k1)
+       mtc0    k0,CP0_EPC
+       /* Save all will redundantly recompute the SP, but use it for now */
+       SAVE_ALL
+       CLI
+       move    a0,sp
+       /* Function to be invoked passed stack pad slot 5 */
+       lw      t0,PT_PADSLOT5(sp)
+       /* Argument from sender passed in stack pad slot 4 */
+       lw      a1,PT_PADSLOT4(sp)
+       jalr    t0
+       nop
+       j       ret_from_irq
+       nop
+
+/*
+ * Called from idle loop to provoke processing of queued IPIs
+ * First IPI message in queue passed as argument.
+ */
+
+LEAF(self_ipi)
+       /* Before anything else, block interrupts */
+       mfc0    t0,CP0_TCSTATUS
+       ori     t1,t0,TCSTATUS_IXMT
+       mtc0    t1,CP0_TCSTATUS
+       ehb
+       /* We know we're in kernel mode, so prepare stack frame */
+       subu    t1,sp,PT_SIZE
+       sw      ra,PT_EPC(t1)
+       sw      a0,PT_PADSLOT4(t1)
+       la      t2,ipi_decode
+       sw      t2,PT_PADSLOT5(t1)
+       /* Save pre-disable value of TCStatus */
+       sw      t0,PT_TCSTATUS(t1)
+       j       __smtc_ipi_vector
+       nop
+END(self_ipi)
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
new file mode 100644 (file)
index 0000000..6f37099
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * /proc hooks for SMTC kernel
+ * Copyright (C) 2005 Mips Technologies, Inc
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/mipsregs.h>
+#include <asm/cacheflush.h>
+#include <linux/proc_fs.h>
+
+#include <asm/smtc_proc.h>
+
+/*
+ * /proc diagnostic and statistics hooks
+ */
+
+/*
+ * Statistics gathered
+ */
+unsigned long selfipis[NR_CPUS];
+
+struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
+
+static struct proc_dir_entry *smtc_stats;
+
+atomic_t smtc_fpu_recoveries;
+
+static int proc_read_smtc(char *page, char **start, off_t off,
+                          int count, int *eof, void *data)
+{
+       int totalen = 0;
+       int len;
+       int i;
+       extern unsigned long ebase;
+
+       len = sprintf(page, "SMTC Status Word: 0x%08x\n", smtc_status);
+       totalen += len;
+       page += len;
+       len = sprintf(page, "Config7: 0x%08x\n", read_c0_config7());
+       totalen += len;
+       page += len;
+       len = sprintf(page, "EBASE: 0x%08lx\n", ebase);
+       totalen += len;
+       page += len;
+       len = sprintf(page, "Counter Interrupts taken per CPU (TC)\n");
+       totalen += len;
+       page += len;
+       for (i=0; i < NR_CPUS; i++) {
+               len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].timerints);
+               totalen += len;
+               page += len;
+       }
+       len = sprintf(page, "Self-IPIs by CPU:\n");
+       totalen += len;
+       page += len;
+       for(i = 0; i < NR_CPUS; i++) {
+               len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
+               totalen += len;
+               page += len;
+       }
+       len = sprintf(page, "%d Recoveries of \"stolen\" FPU\n",
+                     atomic_read(&smtc_fpu_recoveries));
+       totalen += len;
+       page += len;
+
+       return totalen;
+}
+
+void init_smtc_stats(void)
+{
+       int i;
+
+       for (i=0; i<NR_CPUS; i++) {
+               smtc_cpu_stats[i].timerints = 0;
+               smtc_cpu_stats[i].selfipis = 0;
+       }
+
+       atomic_set(&smtc_fpu_recoveries, 0);
+
+       smtc_stats = create_proc_read_entry("smtc", 0444, NULL,
+                                           proc_read_smtc, NULL);
+}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
new file mode 100644 (file)
index 0000000..2e8e52c
--- /dev/null
@@ -0,0 +1,1322 @@
+/* Copyright (C) 2004 Mips Technologies, Inc */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/hardirq.h>
+#include <asm/hazards.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/mipsregs.h>
+#include <asm/cacheflush.h>
+#include <asm/time.h>
+#include <asm/addrspace.h>
+#include <asm/smtc.h>
+#include <asm/smtc_ipi.h>
+#include <asm/smtc_proc.h>
+
+/*
+ * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
+ */
+
+/*
+ * MIPSCPU_INT_BASE is identically defined in both
+ * asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h,
+ * but as yet there's no properly organized include structure that
+ * will ensure that the right *int.h file will be included for a
+ * given platform build.
+ */
+
+#define MIPSCPU_INT_BASE       16
+
+#define MIPS_CPU_IPI_IRQ       1
+
+#define LOCK_MT_PRA() \
+       local_irq_save(flags); \
+       mtflags = dmt()
+
+#define UNLOCK_MT_PRA() \
+       emt(mtflags); \
+       local_irq_restore(flags)
+
+#define LOCK_CORE_PRA() \
+       local_irq_save(flags); \
+       mtflags = dvpe()
+
+#define UNLOCK_CORE_PRA() \
+       evpe(mtflags); \
+       local_irq_restore(flags)
+
+/*
+ * Data structures purely associated with SMTC parallelism
+ */
+
+
+/*
+ * Table for tracking ASIDs whose lifetime is prolonged.
+ */
+
+asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
+
+/*
+ * Clock interrupt "latch" buffers, per "CPU"
+ */
+
+unsigned int ipi_timer_latch[NR_CPUS];
+
+/*
+ * Number of InterProcessor Interupt (IPI) message buffers to allocate
+ */
+
+#define IPIBUF_PER_CPU 4
+
+struct smtc_ipi_q IPIQ[NR_CPUS];
+struct smtc_ipi_q freeIPIq;
+
+
+/* Forward declarations */
+
+void ipi_decode(struct pt_regs *, struct smtc_ipi *);
+void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
+void setup_cross_vpe_interrupts(void);
+void init_smtc_stats(void);
+
+/* Global SMTC Status */
+
+unsigned int smtc_status = 0;
+
+/* Boot command line configuration overrides */
+
+static int vpelimit = 0;
+static int tclimit = 0;
+static int ipibuffers = 0;
+static int nostlb = 0;
+static int asidmask = 0;
+unsigned long smtc_asid_mask = 0xff;
+
+static int __init maxvpes(char *str)
+{
+       get_option(&str, &vpelimit);
+       return 1;
+}
+
+static int __init maxtcs(char *str)
+{
+       get_option(&str, &tclimit);
+       return 1;
+}
+
+static int __init ipibufs(char *str)
+{
+       get_option(&str, &ipibuffers);
+       return 1;
+}
+
+static int __init stlb_disable(char *s)
+{
+       nostlb = 1;
+       return 1;
+}
+
+static int __init asidmask_set(char *str)
+{
+       get_option(&str, &asidmask);
+       switch(asidmask) {
+       case 0x1:
+       case 0x3:
+       case 0x7:
+       case 0xf:
+       case 0x1f:
+       case 0x3f:
+       case 0x7f:
+       case 0xff:
+               smtc_asid_mask = (unsigned long)asidmask;
+               break;
+       default:
+               printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
+       }
+       return 1;
+}
+
+__setup("maxvpes=", maxvpes);
+__setup("maxtcs=", maxtcs);
+__setup("ipibufs=", ipibufs);
+__setup("nostlb", stlb_disable);
+__setup("asidmask=", asidmask_set);
+
+/* Enable additional debug checks before going into CPU idle loop */
+#define SMTC_IDLE_HOOK_DEBUG
+
+#ifdef SMTC_IDLE_HOOK_DEBUG
+
+static int hang_trig = 0;
+
+static int __init hangtrig_enable(char *s)
+{
+       hang_trig = 1;
+       return 1;
+}
+
+
+__setup("hangtrig", hangtrig_enable);
+
+#define DEFAULT_BLOCKED_IPI_LIMIT 32
+
+static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
+
+static int __init tintq(char *str)
+{
+       get_option(&str, &timerq_limit);
+       return 1;
+}
+
+__setup("tintq=", tintq);
+
+int imstuckcount[2][8];
+/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
+int vpemask[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}};
+int tcnoprog[NR_CPUS];
+static atomic_t idle_hook_initialized = {0};
+static int clock_hang_reported[NR_CPUS];
+
+#endif /* SMTC_IDLE_HOOK_DEBUG */
+
+/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
+
+void __init sanitize_tlb_entries(void)
+{
+       printk("Deprecated sanitize_tlb_entries() invoked\n");
+}
+
+
+/*
+ * Configure shared TLB - VPC configuration bit must be set by caller
+ */
+
+void smtc_configure_tlb(void)
+{
+       int i,tlbsiz,vpes;
+       unsigned long mvpconf0;
+       unsigned long config1val;
+
+       /* Set up ASID preservation table */
+       for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
+           for(i = 0; i < MAX_SMTC_ASIDS; i++) {
+               smtc_live_asid[vpes][i] = 0;
+           }
+       }
+       mvpconf0 = read_c0_mvpconf0();
+
+       if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
+                       >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
+           /* If we have multiple VPEs, try to share the TLB */
+           if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
+               /*
+                * If TLB sizing is programmable, shared TLB
+                * size is the total available complement.
+                * Otherwise, we have to take the sum of all
+                * static VPE TLB entries.
+                */
+               if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
+                               >> MVPCONF0_PTLBE_SHIFT)) == 0) {
+                   /*
+                    * If there's more than one VPE, there had better
+                    * be more than one TC, because we need one to bind
+                    * to each VPE in turn to be able to read
+                    * its configuration state!
+                    */
+                   settc(1);
+                   /* Stop the TC from doing anything foolish */
+                   write_tc_c0_tchalt(TCHALT_H);
+                   mips_ihb();
+                   /* No need to un-Halt - that happens later anyway */
+                   for (i=0; i < vpes; i++) {
+                       write_tc_c0_tcbind(i);
+                       /*
+                        * To be 100% sure we're really getting the right
+                        * information, we exit the configuration state
+                        * and do an IHB after each rebinding.
+                        */
+                       write_c0_mvpcontrol(
+                               read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
+                       mips_ihb();
+                       /*
+                        * Only count if the MMU Type indicated is TLB
+                        */
+                       if(((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
+                               config1val = read_vpe_c0_config1();
+                               tlbsiz += ((config1val >> 25) & 0x3f) + 1;
+                       }
+
+                       /* Put core back in configuration state */
+                       write_c0_mvpcontrol(
+                               read_c0_mvpcontrol() | MVPCONTROL_VPC );
+                       mips_ihb();
+                   }
+               }
+               write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
+
+               /*
+                * Setup kernel data structures to use software total,
+                * rather than read the per-VPE Config1 value. The values
+                * for "CPU 0" gets copied to all the other CPUs as part
+                * of their initialization in smtc_cpu_setup().
+                */
+
+               tlbsiz = tlbsiz & 0x3f; /* MIPS32 limits TLB indices to 64 */
+               cpu_data[0].tlbsize = tlbsiz;
+               smtc_status |= SMTC_TLB_SHARED;
+
+               printk("TLB of %d entry pairs shared by %d VPEs\n",
+                       tlbsiz, vpes);
+           } else {
+               printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
+           }
+       }
+}
+
+
+/*
+ * Incrementally build the CPU map out of constituent MIPS MT cores,
+ * using the specified available VPEs and TCs.  Plaform code needs
+ * to ensure that each MIPS MT core invokes this routine on reset,
+ * one at a time(!).
+ *
+ * This version of the build_cpu_map and prepare_cpus routines assumes
+ * that *all* TCs of a MIPS MT core will be used for Linux, and that
+ * they will be spread across *all* available VPEs (to minimise the
+ * loss of efficiency due to exception service serialization).
+ * An improved version would pick up configuration information and
+ * possibly leave some TCs/VPEs as "slave" processors.
+ *
+ * Use c0_MVPConf0 to find out how many TCs are available, setting up
+ * phys_cpu_present_map and the logical/physical mappings.
+ */
+
+int __init mipsmt_build_cpu_map(int start_cpu_slot)
+{
+       int i, ntcs;
+
+       /*
+        * The CPU map isn't actually used for anything at this point,
+        * so it's not clear what else we should do apart from set
+        * everything up so that "logical" = "physical".
+        */
+       ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+       for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
+               cpu_set(i, phys_cpu_present_map);
+               __cpu_number_map[i] = i;
+               __cpu_logical_map[i] = i;
+       }
+       /* Initialize map of CPUs with FPUs */
+       cpus_clear(mt_fpu_cpumask);
+
+       /* One of those TC's is the one booting, and not a secondary... */
+       printk("%i available secondary CPU TC(s)\n", i - 1);
+
+       return i;
+}
+
+/*
+ * Common setup before any secondaries are started
+ * Make sure all CPU's are in a sensible state before we boot any of the
+ * secondaries.
+ *
+ * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
+ * as possible across the available VPEs.
+ */
+
+static void smtc_tc_setup(int vpe, int tc, int cpu)
+{
+       settc(tc);
+       write_tc_c0_tchalt(TCHALT_H);
+       mips_ihb();
+       write_tc_c0_tcstatus((read_tc_c0_tcstatus()
+                       & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
+                       | TCSTATUS_A);
+       write_tc_c0_tccontext(0);
+       /* Bind tc to vpe */
+       write_tc_c0_tcbind(vpe);
+       /* In general, all TCs should have the same cpu_data indications */
+       memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
+       /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
+       if (cpu_data[0].cputype == CPU_34K)
+               cpu_data[cpu].options &= ~MIPS_CPU_FPU;
+       cpu_data[cpu].vpe_id = vpe;
+       cpu_data[cpu].tc_id = tc;
+}
+
+
+void mipsmt_prepare_cpus(void)
+{
+       int i, vpe, tc, ntc, nvpe, tcpervpe, slop, cpu;
+       unsigned long flags;
+       unsigned long val;
+       int nipi;
+       struct smtc_ipi *pipi;
+
+       /* disable interrupts so we can disable MT */
+       local_irq_save(flags);
+       /* disable MT so we can configure */
+       dvpe();
+       dmt();
+
+       freeIPIq.lock = SPIN_LOCK_UNLOCKED;
+
+       /*
+        * We probably don't have as many VPEs as we do SMP "CPUs",
+        * but it's possible - and in any case we'll never use more!
+        */
+       for (i=0; i<NR_CPUS; i++) {
+               IPIQ[i].head = IPIQ[i].tail = NULL;
+               IPIQ[i].lock = SPIN_LOCK_UNLOCKED;
+               IPIQ[i].depth = 0;
+               ipi_timer_latch[i] = 0;
+       }
+
+       /* cpu_data index starts at zero */
+       cpu = 0;
+       cpu_data[cpu].vpe_id = 0;
+       cpu_data[cpu].tc_id = 0;
+       cpu++;
+
+       /* Report on boot-time options */
+       mips_mt_set_cpuoptions ();
+       if (vpelimit > 0)
+               printk("Limit of %d VPEs set\n", vpelimit);
+       if (tclimit > 0)
+               printk("Limit of %d TCs set\n", tclimit);
+       if (nostlb) {
+               printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
+       }
+       if (asidmask)
+               printk("ASID mask value override to 0x%x\n", asidmask);
+
+       /* Temporary */
+#ifdef SMTC_IDLE_HOOK_DEBUG
+       if (hang_trig)
+               printk("Logic Analyser Trigger on suspected TC hang\n");
+#endif /* SMTC_IDLE_HOOK_DEBUG */
+
+       /* Put MVPE's into 'configuration state' */
+       write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
+
+       val = read_c0_mvpconf0();
+       nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+       if (vpelimit > 0 && nvpe > vpelimit)
+               nvpe = vpelimit;
+       ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+       if (ntc > NR_CPUS)
+               ntc = NR_CPUS;
+       if (tclimit > 0 && ntc > tclimit)
+               ntc = tclimit;
+       tcpervpe = ntc / nvpe;
+       slop = ntc % nvpe;      /* Residual TCs, < NVPE */
+
+       /* Set up shared TLB */
+       smtc_configure_tlb();
+
+       for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
+               /*
+                * Set the MVP bits.
+                */
+               settc(tc);
+               write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
+               if (vpe != 0)
+                       printk(", ");
+               printk("VPE %d: TC", vpe);
+               for (i = 0; i < tcpervpe; i++) {
+                       /*
+                        * TC 0 is bound to VPE 0 at reset,
+                        * and is presumably executing this
+                        * code.  Leave it alone!
+                        */
+                       if (tc != 0) {
+                               smtc_tc_setup(vpe,tc, cpu);
+                               cpu++;
+                       }
+                       printk(" %d", tc);
+                       tc++;
+               }
+               if (slop) {
+                       if (tc != 0) {
+                               smtc_tc_setup(vpe,tc, cpu);
+                               cpu++;
+                       }
+                       printk(" %d", tc);
+                       tc++;
+                       slop--;
+               }
+               if (vpe != 0) {
+                       /*
+                        * Clear any stale software interrupts from VPE's Cause
+                        */
+                       write_vpe_c0_cause(0);
+
+                       /*
+                        * Clear ERL/EXL of VPEs other than 0
+                        * and set restricted interrupt enable/mask.
+                        */
+                       write_vpe_c0_status((read_vpe_c0_status()
+                               & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
+                               | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
+                               | ST0_IE));
+                       /*
+                        * set config to be the same as vpe0,
+                        *  particularly kseg0 coherency alg
+                        */
+                       write_vpe_c0_config(read_c0_config());
+                       /* Clear any pending timer interrupt */
+                       write_vpe_c0_compare(0);
+                       /* Propagate Config7 */
+                       write_vpe_c0_config7(read_c0_config7());
+               }
+               /* enable multi-threading within VPE */
+               write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
+               /* enable the VPE */
+               write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
+       }
+
+       /*
+        * Pull any physically present but unused TCs out of circulation.
+        */
+       while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
+               cpu_clear(tc, phys_cpu_present_map);
+               cpu_clear(tc, cpu_present_map);
+               tc++;
+       }
+
+       /* release config state */
+       write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
+
+       printk("\n");
+
+       /* Set up coprocessor affinity CPU mask(s) */
+
+       for (tc = 0; tc < ntc; tc++) {
+               if(cpu_data[tc].options & MIPS_CPU_FPU)
+                       cpu_set(tc, mt_fpu_cpumask);
+       }
+
+       /* set up ipi interrupts... */
+
+       /* If we have multiple VPEs running, set up the cross-VPE interrupt */
+
+       if (nvpe > 1)
+               setup_cross_vpe_interrupts();
+
+       /* Set up queue of free IPI "messages". */
+       nipi = NR_CPUS * IPIBUF_PER_CPU;
+       if (ipibuffers > 0)
+               nipi = ipibuffers;
+
+       pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
+       if (pipi == NULL)
+               panic("kmalloc of IPI message buffers failed\n");
+       else
+               printk("IPI buffer pool of %d buffers\n", nipi);
+       for (i = 0; i < nipi; i++) {
+               smtc_ipi_nq(&freeIPIq, pipi);
+               pipi++;
+       }
+
+       /* Arm multithreading and enable other VPEs - but all TCs are Halted */
+       emt(EMT_ENABLE);
+       evpe(EVPE_ENABLE);
+       local_irq_restore(flags);
+       /* Initialize SMTC /proc statistics/diagnostics */
+       init_smtc_stats();
+}
+
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it
+ * running!
+ * smp_bootstrap is the place to resume from
+ * __KSTK_TOS(idle) is apparently the stack pointer
+ * (unsigned long)idle->thread_info the gp
+ *
+ */
+void smtc_boot_secondary(int cpu, struct task_struct *idle)
+{
+       extern u32 kernelsp[NR_CPUS];
+       long flags;
+       int mtflags;
+
+       LOCK_MT_PRA();
+       if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
+               dvpe();
+       }
+       settc(cpu_data[cpu].tc_id);
+
+       /* pc */
+       write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
+
+       /* stack pointer */
+       kernelsp[cpu] = __KSTK_TOS(idle);
+       write_tc_gpr_sp(__KSTK_TOS(idle));
+
+       /* global pointer */
+       write_tc_gpr_gp((unsigned long)idle->thread_info);
+
+       smtc_status |= SMTC_MTC_ACTIVE;
+       write_tc_c0_tchalt(0);
+       if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
+               evpe(EVPE_ENABLE);
+       }
+       UNLOCK_MT_PRA();
+}
+
+void smtc_init_secondary(void)
+{
+       /*
+        * Start timer on secondary VPEs if necessary.
+        * mips_timer_setup should already have been invoked by init/main
+        * on "boot" TC.  Like per_cpu_trap_init() hack, this assumes that
+        * SMTC init code assigns TCs consdecutively and in ascending order
+        * to across available VPEs.
+        */
+       if(((read_c0_tcbind() & TCBIND_CURTC) != 0)
+       && ((read_c0_tcbind() & TCBIND_CURVPE)
+           != cpu_data[smp_processor_id() - 1].vpe_id)){
+               write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
+       }
+
+       local_irq_enable();
+}
+
+void smtc_smp_finish(void)
+{
+       printk("TC %d going on-line as CPU %d\n",
+               cpu_data[smp_processor_id()].tc_id, smp_processor_id());
+}
+
+void smtc_cpus_done(void)
+{
+}
+
+/*
+ * Support for SMTC-optimized driver IRQ registration
+ */
+
+/*
+ * SMTC Kernel needs to manipulate low-level CPU interrupt mask
+ * in do_IRQ. These are passed in setup_irq_smtc() and stored
+ * in this table.
+ */
+
+int setup_irq_smtc(unsigned int irq, struct irqaction * new,
+                       unsigned long hwmask)
+{
+       irq_hwmask[irq] = hwmask;
+
+       return setup_irq(irq, new);
+}
+
+/*
+ * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
+ * Within a VPE one TC can interrupt another by different approaches.
+ * The easiest to get right would probably be to make all TCs except
+ * the target IXMT and set a software interrupt, but an IXMT-based
+ * scheme requires that a handler must run before a new IPI could
+ * be sent, which would break the "broadcast" loops in MIPS MT.
+ * A more gonzo approach within a VPE is to halt the TC, extract
+ * its Restart, Status, and a couple of GPRs, and program the Restart
+ * address to emulate an interrupt.
+ *
+ * Within a VPE, one can be confident that the target TC isn't in
+ * a critical EXL state when halted, since the write to the Halt
+ * register could not have issued on the writing thread if the
+ * halting thread had EXL set. So k0 and k1 of the target TC
+ * can be used by the injection code.  Across VPEs, one can't
+ * be certain that the target TC isn't in a critical exception
+ * state. So we try a two-step process of sending a software
+ * interrupt to the target VPE, which either handles the event
+ * itself (if it was the target) or injects the event within
+ * the VPE.
+ */
+
+void smtc_ipi_qdump(void)
+{
+       int i;
+
+       for (i = 0; i < NR_CPUS ;i++) {
+               printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
+                       i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
+                       IPIQ[i].depth);
+       }
+}
+
+/*
+ * The standard atomic.h primitives don't quite do what we want
+ * here: We need an atomic add-and-return-previous-value (which
+ * could be done with atomic_add_return and a decrement) and an
+ * atomic set/zero-and-return-previous-value (which can't really
+ * be done with the atomic.h primitives). And since this is
+ * MIPS MT, we can assume that we have LL/SC.
+ */
+static __inline__ int atomic_postincrement(unsigned int *pv)
+{
+       unsigned long result;
+
+       unsigned long temp;
+
+       __asm__ __volatile__(
+       "1:     ll      %0, %2                                  \n"
+       "       addu    %1, %0, 1                               \n"
+       "       sc      %1, %2                                  \n"
+       "       beqz    %1, 1b                                  \n"
+       "       sync                                            \n"
+       : "=&r" (result), "=&r" (temp), "=m" (*pv)
+       : "m" (*pv)
+       : "memory");
+
+       return result;
+}
+
+/* No longer used in IPI dispatch, but retained for future recycling */
+
+static __inline__ int atomic_postclear(unsigned int *pv)
+{
+       unsigned long result;
+
+       unsigned long temp;
+
+       __asm__ __volatile__(
+       "1:     ll      %0, %2                                  \n"
+       "       or      %1, $0, $0                              \n"
+       "       sc      %1, %2                                  \n"
+       "       beqz    %1, 1b                                  \n"
+       "       sync                                            \n"
+       : "=&r" (result), "=&r" (temp), "=m" (*pv)
+       : "m" (*pv)
+       : "memory");
+
+       return result;
+}
+
+
+void smtc_send_ipi(int cpu, int type, unsigned int action)
+{
+       int tcstatus;
+       struct smtc_ipi *pipi;
+       long flags;
+       int mtflags;
+
+       if (cpu == smp_processor_id()) {
+               printk("Cannot Send IPI to self!\n");
+               return;
+       }
+       /* Set up a descriptor, to be delivered either promptly or queued */
+       pipi = smtc_ipi_dq(&freeIPIq);
+       if (pipi == NULL) {
+               bust_spinlocks(1);
+               mips_mt_regdump(dvpe());
+               panic("IPI Msg. Buffers Depleted\n");
+       }
+       pipi->type = type;
+       pipi->arg = (void *)action;
+       pipi->dest = cpu;
+       if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
+               /* If not on same VPE, enqueue and send cross-VPE interupt */
+               smtc_ipi_nq(&IPIQ[cpu], pipi);
+               LOCK_CORE_PRA();
+               settc(cpu_data[cpu].tc_id);
+               write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
+               UNLOCK_CORE_PRA();
+       } else {
+               /*
+                * Not sufficient to do a LOCK_MT_PRA (dmt) here,
+                * since ASID shootdown on the other VPE may
+                * collide with this operation.
+                */
+               LOCK_CORE_PRA();
+               settc(cpu_data[cpu].tc_id);
+               /* Halt the targeted TC */
+               write_tc_c0_tchalt(TCHALT_H);
+               mips_ihb();
+
+               /*
+                * Inspect TCStatus - if IXMT is set, we have to queue
+                * a message. Otherwise, we set up the "interrupt"
+                * of the other TC
+                */
+               tcstatus = read_tc_c0_tcstatus();
+
+               if ((tcstatus & TCSTATUS_IXMT) != 0) {
+                       /*
+                        * Spin-waiting here can deadlock,
+                        * so we queue the message for the target TC.
+                        */
+                       write_tc_c0_tchalt(0);
+                       UNLOCK_CORE_PRA();
+                       /* Try to reduce redundant timer interrupt messages */
+                       if(type == SMTC_CLOCK_TICK) {
+                           if(atomic_postincrement(&ipi_timer_latch[cpu])!=0) {
+                               smtc_ipi_nq(&freeIPIq, pipi);
+                               return;
+                           }
+                       }
+                       smtc_ipi_nq(&IPIQ[cpu], pipi);
+               } else {
+                       post_direct_ipi(cpu, pipi);
+                       write_tc_c0_tchalt(0);
+                       UNLOCK_CORE_PRA();
+               }
+       }
+}
+
+/*
+ * Send IPI message to Halted TC, TargTC/TargVPE already having been set
+ */
+void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
+{
+       struct pt_regs *kstack;
+       unsigned long tcstatus;
+       unsigned long tcrestart;
+       extern u32 kernelsp[NR_CPUS];
+       extern void __smtc_ipi_vector(void);
+
+       /* Extract Status, EPC from halted TC */
+       tcstatus = read_tc_c0_tcstatus();
+       tcrestart = read_tc_c0_tcrestart();
+       /* If TCRestart indicates a WAIT instruction, advance the PC */
+       if ((tcrestart & 0x80000000)
+           && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
+               tcrestart += 4;
+       }
+       /*
+        * Save on TC's future kernel stack
+        *
+        * CU bit of Status is indicator that TC was
+        * already running on a kernel stack...
+        */
+       if(tcstatus & ST0_CU0)  {
+               /* Note that this "- 1" is pointer arithmetic */
+               kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
+       } else {
+               kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
+       }
+
+       kstack->cp0_epc = (long)tcrestart;
+       /* Save TCStatus */
+       kstack->cp0_tcstatus = tcstatus;
+       /* Pass token of operation to be performed kernel stack pad area */
+       kstack->pad0[4] = (unsigned long)pipi;
+       /* Pass address of function to be called likewise */
+       kstack->pad0[5] = (unsigned long)&ipi_decode;
+       /* Set interrupt exempt and kernel mode */
+       tcstatus |= TCSTATUS_IXMT;
+       tcstatus &= ~TCSTATUS_TKSU;
+       write_tc_c0_tcstatus(tcstatus);
+       ehb();
+       /* Set TC Restart address to be SMTC IPI vector */
+       write_tc_c0_tcrestart(__smtc_ipi_vector);
+}
+
+void ipi_resched_interrupt(struct pt_regs *regs)
+{
+       /* Return from interrupt should be enough to cause scheduler check */
+}
+
+
+void ipi_call_interrupt(struct pt_regs *regs)
+{
+       /* Invoke generic function invocation code in smp.c */
+       smp_call_function_interrupt();
+}
+
+void ipi_decode(struct pt_regs *regs, struct smtc_ipi *pipi)
+{
+       void *arg_copy = pipi->arg;
+       int type_copy = pipi->type;
+       int dest_copy = pipi->dest;
+
+       smtc_ipi_nq(&freeIPIq, pipi);
+       switch (type_copy) {
+               case SMTC_CLOCK_TICK:
+                       /* Invoke Clock "Interrupt" */
+                       ipi_timer_latch[dest_copy] = 0;
+#ifdef SMTC_IDLE_HOOK_DEBUG
+                       clock_hang_reported[dest_copy] = 0;
+#endif /* SMTC_IDLE_HOOK_DEBUG */
+                       local_timer_interrupt(0, NULL, regs);
+                       break;
+               case LINUX_SMP_IPI:
+                       switch ((int)arg_copy) {
+                       case SMP_RESCHEDULE_YOURSELF:
+                               ipi_resched_interrupt(regs);
+                               break;
+                       case SMP_CALL_FUNCTION:
+                               ipi_call_interrupt(regs);
+                               break;
+                       default:
+                               printk("Impossible SMTC IPI Argument 0x%x\n",
+                                       (int)arg_copy);
+                               break;
+                       }
+                       break;
+               default:
+                       printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
+                       break;
+       }
+}
+
+void deferred_smtc_ipi(struct pt_regs *regs)
+{
+       struct smtc_ipi *pipi;
+       unsigned long flags;
+/* DEBUG */
+       int q = smp_processor_id();
+
+       /*
+        * Test is not atomic, but much faster than a dequeue,
+        * and the vast majority of invocations will have a null queue.
+        */
+       if(IPIQ[q].head != NULL) {
+               while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
+                       /* ipi_decode() should be called with interrupts off */
+                       local_irq_save(flags);
+                       ipi_decode(regs, pipi);
+                       local_irq_restore(flags);
+               }
+       }
+}
+
+/*
+ * Send clock tick to all TCs except the one executing the funtion
+ */
+
+void smtc_timer_broadcast(int vpe)
+{
+       int cpu;
+       int myTC = cpu_data[smp_processor_id()].tc_id;
+       int myVPE = cpu_data[smp_processor_id()].vpe_id;
+
+       smtc_cpu_stats[smp_processor_id()].timerints++;
+
+       for_each_online_cpu(cpu) {
+               if (cpu_data[cpu].vpe_id == myVPE &&
+                   cpu_data[cpu].tc_id != myTC)
+                       smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
+       }
+}
+
+/*
+ * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
+ * set via cross-VPE MTTR manipulation of the Cause register. It would be
+ * in some regards preferable to have external logic for "doorbell" hardware
+ * interrupts.
+ */
+
+static int cpu_ipi_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_IRQ;
+
+static irqreturn_t ipi_interrupt(int irq, void *dev_idm, struct pt_regs *regs)
+{
+       int my_vpe = cpu_data[smp_processor_id()].vpe_id;
+       int my_tc = cpu_data[smp_processor_id()].tc_id;
+       int cpu;
+       struct smtc_ipi *pipi;
+       unsigned long tcstatus;
+       int sent;
+       long flags;
+       unsigned int mtflags;
+       unsigned int vpflags;
+
+       /*
+        * So long as cross-VPE interrupts are done via
+        * MFTR/MTTR read-modify-writes of Cause, we need
+        * to stop other VPEs whenever the local VPE does
+        * anything similar.
+        */
+       local_irq_save(flags);
+       vpflags = dvpe();
+       clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
+       set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
+       irq_enable_hazard();
+       evpe(vpflags);
+       local_irq_restore(flags);
+
+       /*
+        * Cross-VPE Interrupt handler: Try to directly deliver IPIs
+        * queued for TCs on this VPE other than the current one.
+        * Return-from-interrupt should cause us to drain the queue
+        * for the current TC, so we ought not to have to do it explicitly here.
+        */
+
+       for_each_online_cpu(cpu) {
+               if (cpu_data[cpu].vpe_id != my_vpe)
+                       continue;
+
+               pipi = smtc_ipi_dq(&IPIQ[cpu]);
+               if (pipi != NULL) {
+                       if (cpu_data[cpu].tc_id != my_tc) {
+                               sent = 0;
+                               LOCK_MT_PRA();
+                               settc(cpu_data[cpu].tc_id);
+                               write_tc_c0_tchalt(TCHALT_H);
+                               mips_ihb();
+                               tcstatus = read_tc_c0_tcstatus();
+                               if ((tcstatus & TCSTATUS_IXMT) == 0) {
+                                       post_direct_ipi(cpu, pipi);
+                                       sent = 1;
+                               }
+                               write_tc_c0_tchalt(0);
+                               UNLOCK_MT_PRA();
+                               if (!sent) {
+                                       smtc_ipi_req(&IPIQ[cpu], pipi);
+                               }
+                       } else {
+                               /*
+                                * ipi_decode() should be called
+                                * with interrupts off
+                                */
+                               local_irq_save(flags);
+                               ipi_decode(regs, pipi);
+                               local_irq_restore(flags);
+                       }
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void ipi_irq_dispatch(struct pt_regs *regs)
+{
+       do_IRQ(cpu_ipi_irq, regs);
+}
+
+static struct irqaction irq_ipi;
+
+void setup_cross_vpe_interrupts(void)
+{
+       if (!cpu_has_vint)
+               panic("SMTC Kernel requires Vectored Interupt support");
+
+       set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
+
+       irq_ipi.handler = ipi_interrupt;
+       irq_ipi.flags = SA_INTERRUPT;
+       irq_ipi.name = "SMTC_IPI";
+
+       setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
+
+       irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU;
+}
+
+/*
+ * SMTC-specific hacks invoked from elsewhere in the kernel.
+ */
+
+void smtc_idle_loop_hook(void)
+{
+#ifdef SMTC_IDLE_HOOK_DEBUG
+       int im;
+       int flags;
+       int mtflags;
+       int bit;
+       int vpe;
+       int tc;
+       int hook_ntcs;
+       /*
+        * printk within DMT-protected regions can deadlock,
+        * so buffer diagnostic messages for later output.
+        */
+       char *pdb_msg;
+       char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
+
+       if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
+               if (atomic_add_return(1, &idle_hook_initialized) == 1) {
+                       int mvpconf0;
+                       /* Tedious stuff to just do once */
+                       mvpconf0 = read_c0_mvpconf0();
+                       hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+                       if (hook_ntcs > NR_CPUS)
+                               hook_ntcs = NR_CPUS;
+                       for (tc = 0; tc < hook_ntcs; tc++) {
+                               tcnoprog[tc] = 0;
+                               clock_hang_reported[tc] = 0;
+                       }
+                       for (vpe = 0; vpe < 2; vpe++)
+                               for (im = 0; im < 8; im++)
+                                       imstuckcount[vpe][im] = 0;
+                       printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
+                       atomic_set(&idle_hook_initialized, 1000);
+               } else {
+                       /* Someone else is initializing in parallel - let 'em finish */
+                       while (atomic_read(&idle_hook_initialized) < 1000)
+                               ;
+               }
+       }
+
+       /* Have we stupidly left IXMT set somewhere? */
+       if (read_c0_tcstatus() & 0x400) {
+               write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
+               ehb();
+               printk("Dangling IXMT in cpu_idle()\n");
+       }
+
+       /* Have we stupidly left an IM bit turned off? */
+#define IM_LIMIT 2000
+       local_irq_save(flags);
+       mtflags = dmt();
+       pdb_msg = &id_ho_db_msg[0];
+       im = read_c0_status();
+       vpe = cpu_data[smp_processor_id()].vpe_id;
+       for (bit = 0; bit < 8; bit++) {
+               /*
+                * In current prototype, I/O interrupts
+                * are masked for VPE > 0
+                */
+               if (vpemask[vpe][bit]) {
+                       if (!(im & (0x100 << bit)))
+                               imstuckcount[vpe][bit]++;
+                       else
+                               imstuckcount[vpe][bit] = 0;
+                       if (imstuckcount[vpe][bit] > IM_LIMIT) {
+                               set_c0_status(0x100 << bit);
+                               ehb();
+                               imstuckcount[vpe][bit] = 0;
+                               pdb_msg += sprintf(pdb_msg,
+                                       "Dangling IM %d fixed for VPE %d\n", bit,
+                                       vpe);
+                       }
+               }
+       }
+
+       /*
+        * Now that we limit outstanding timer IPIs, check for hung TC
+        */
+       for (tc = 0; tc < NR_CPUS; tc++) {
+               /* Don't check ourself - we'll dequeue IPIs just below */
+               if ((tc != smp_processor_id()) &&
+                   ipi_timer_latch[tc] > timerq_limit) {
+                   if (clock_hang_reported[tc] == 0) {
+                       pdb_msg += sprintf(pdb_msg,
+                               "TC %d looks hung with timer latch at %d\n",
+                               tc, ipi_timer_latch[tc]);
+                       clock_hang_reported[tc]++;
+                       }
+               }
+       }
+       emt(mtflags);
+       local_irq_restore(flags);
+       if (pdb_msg != &id_ho_db_msg[0])
+               printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
+#endif /* SMTC_IDLE_HOOK_DEBUG */
+       /*
+        * To the extent that we've ever turned interrupts off,
+        * we may have accumulated deferred IPIs.  This is subtle.
+        * If we use the smtc_ipi_qdepth() macro, we'll get an
+        * exact number - but we'll also disable interrupts
+        * and create a window of failure where a new IPI gets
+        * queued after we test the depth but before we re-enable
+        * interrupts. So long as IXMT never gets set, however,
+        * we should be OK:  If we pick up something and dispatch
+        * it here, that's great. If we see nothing, but concurrent
+        * with this operation, another TC sends us an IPI, IXMT
+        * is clear, and we'll handle it as a real pseudo-interrupt
+        * and not a pseudo-pseudo interrupt.
+        */
+       if (IPIQ[smp_processor_id()].depth > 0) {
+               struct smtc_ipi *pipi;
+               extern void self_ipi(struct smtc_ipi *);
+
+               if ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()])) != NULL) {
+                       self_ipi(pipi);
+                       smtc_cpu_stats[smp_processor_id()].selfipis++;
+               }
+       }
+}
+
+void smtc_soft_dump(void)
+{
+       int i;
+
+       printk("Counter Interrupts taken per CPU (TC)\n");
+       for (i=0; i < NR_CPUS; i++) {
+               printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
+       }
+       printk("Self-IPI invocations:\n");
+       for (i=0; i < NR_CPUS; i++) {
+               printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
+       }
+       smtc_ipi_qdump();
+       printk("Timer IPI Backlogs:\n");
+       for (i=0; i < NR_CPUS; i++) {
+               printk("%d: %d\n", i, ipi_timer_latch[i]);
+       }
+       printk("%d Recoveries of \"stolen\" FPU\n",
+              atomic_read(&smtc_fpu_recoveries));
+}
+
+
+/*
+ * TLB management routines special to SMTC
+ */
+
+void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
+{
+       unsigned long flags, mtflags, tcstat, prevhalt, asid;
+       int tlb, i;
+
+       /*
+        * It would be nice to be able to use a spinlock here,
+        * but this is invoked from within TLB flush routines
+        * that protect themselves with DVPE, so if a lock is
+         * held by another TC, it'll never be freed.
+        *
+        * DVPE/DMT must not be done with interrupts enabled,
+        * so even so most callers will already have disabled
+        * them, let's be really careful...
+        */
+
+       local_irq_save(flags);
+       if (smtc_status & SMTC_TLB_SHARED) {
+               mtflags = dvpe();
+               tlb = 0;
+       } else {
+               mtflags = dmt();
+               tlb = cpu_data[cpu].vpe_id;
+       }
+       asid = asid_cache(cpu);
+
+       do {
+               if (!((asid += ASID_INC) & ASID_MASK) ) {
+                       if (cpu_has_vtag_icache)
+                               flush_icache_all();
+                       /* Traverse all online CPUs (hack requires contigous range) */
+                       for (i = 0; i < num_online_cpus(); i++) {
+                               /*
+                                * We don't need to worry about our own CPU, nor those of
+                                * CPUs who don't share our TLB.
+                                */
+                               if ((i != smp_processor_id()) &&
+                                   ((smtc_status & SMTC_TLB_SHARED) ||
+                                    (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
+                                       settc(cpu_data[i].tc_id);
+                                       prevhalt = read_tc_c0_tchalt() & TCHALT_H;
+                                       if (!prevhalt) {
+                                               write_tc_c0_tchalt(TCHALT_H);
+                                               mips_ihb();
+                                       }
+                                       tcstat = read_tc_c0_tcstatus();
+                                       smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
+                                       if (!prevhalt)
+                                               write_tc_c0_tchalt(0);
+                               }
+                       }
+                       if (!asid)              /* fix version if needed */
+                               asid = ASID_FIRST_VERSION;
+                       local_flush_tlb_all();  /* start new asid cycle */
+               }
+       } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
+
+       /*
+        * SMTC shares the TLB within VPEs and possibly across all VPEs.
+        */
+       for (i = 0; i < num_online_cpus(); i++) {
+               if ((smtc_status & SMTC_TLB_SHARED) ||
+                   (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
+                       cpu_context(i, mm) = asid_cache(i) = asid;
+       }
+
+       if (smtc_status & SMTC_TLB_SHARED)
+               evpe(mtflags);
+       else
+               emt(mtflags);
+       local_irq_restore(flags);
+}
+
+/*
+ * Invoked from macros defined in mmu_context.h
+ * which must already have disabled interrupts
+ * and done a DVPE or DMT as appropriate.
+ */
+
+void smtc_flush_tlb_asid(unsigned long asid)
+{
+       int entry;
+       unsigned long ehi;
+
+       entry = read_c0_wired();
+
+       /* Traverse all non-wired entries */
+       while (entry < current_cpu_data.tlbsize) {
+               write_c0_index(entry);
+               ehb();
+               tlb_read();
+               ehb();
+               ehi = read_c0_entryhi();
+               if((ehi & ASID_MASK) == asid) {
+                   /*
+                    * Invalidate only entries with specified ASID,
+                    * makiing sure all entries differ.
+                    */
+                   write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
+                   write_c0_entrylo0(0);
+                   write_c0_entrylo1(0);
+                   mtc0_tlbw_hazard();
+                   tlb_write_indexed();
+               }
+               entry++;
+       }
+       write_c0_index(PARKED_INDEX);
+       tlbw_use_hazard();
+}
+
+/*
+ * Support for single-threading cache flush operations.
+ */
+
+int halt_state_save[NR_CPUS];
+
+/*
+ * To really, really be sure that nothing is being done
+ * by other TCs, halt them all.  This code assumes that
+ * a DVPE has already been done, so while their Halted
+ * state is theoretically architecturally unstable, in
+ * practice, it's not going to change while we're looking
+ * at it.
+ */
+
+void smtc_cflush_lockdown(void)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu) {
+               if (cpu != smp_processor_id()) {
+                       settc(cpu_data[cpu].tc_id);
+                       halt_state_save[cpu] = read_tc_c0_tchalt();
+                       write_tc_c0_tchalt(TCHALT_H);
+               }
+       }
+       mips_ihb();
+}
+
+/* It would be cheating to change the cpu_online states during a flush! */
+
+void smtc_cflush_release(void)
+{
+       int cpu;
+
+       /*
+        * Start with a hazard barrier to ensure
+        * that all CACHE ops have played through.
+        */
+       mips_ihb();
+
+       for_each_online_cpu(cpu) {
+               if (cpu != smp_processor_id()) {
+                       settc(cpu_data[cpu].tc_id);
+                       write_tc_c0_tchalt(halt_state_save[cpu]);
+               }
+       }
+       mips_ihb();
+}
index 5e51a2d8f3f0712072e59a2a53f01a46b9880453..13ff4da598cdfbcff3b2d34677c0e89cd3b55e28 100644 (file)
@@ -116,8 +116,7 @@ static void c0_timer_ack(void)
        write_c0_compare(expirelo);
 
        /* Check to see if we have missed any timer interrupts.  */
-       count = read_c0_count();
-       if ((count - expirelo) < 0x7fffffff) {
+       while (((count = read_c0_count()) - expirelo) < 0x7fffffff) {
                /* missed_timer_count++; */
                expirelo = count + cycles_per_jiffy;
                write_c0_compare(expirelo);
index 081e6ed5bb6222220ddd0c6a39d79da64fa43154..6336fe8008ec6443d29f39644d8309f72b66daf7 100644 (file)
@@ -280,9 +280,16 @@ static DEFINE_SPINLOCK(die_lock);
 NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
 {
        static int die_counter;
+#ifdef CONFIG_MIPS_MT_SMTC
+       unsigned long dvpret = dvpe();
+#endif /* CONFIG_MIPS_MT_SMTC */
 
        console_verbose();
        spin_lock_irq(&die_lock);
+       bust_spinlocks(1);
+#ifdef CONFIG_MIPS_MT_SMTC
+       mips_mt_regdump(dvpret);
+#endif /* CONFIG_MIPS_MT_SMTC */
        printk("%s[#%d]:\n", str, ++die_counter);
        show_registers(regs);
        spin_unlock_irq(&die_lock);
@@ -757,6 +764,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
 
        case 2:
        case 3:
+               die_if_kernel("do_cpu invoked from kernel context!", regs);
                break;
        }
 
@@ -794,6 +802,36 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
 
 asmlinkage void do_mt(struct pt_regs *regs)
 {
+       int subcode;
+
+       die_if_kernel("MIPS MT Thread exception in kernel", regs);
+
+       subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
+                       >> VPECONTROL_EXCPT_SHIFT;
+       switch (subcode) {
+       case 0:
+               printk(KERN_ERR "Thread Underflow\n");
+               break;
+       case 1:
+               printk(KERN_ERR "Thread Overflow\n");
+               break;
+       case 2:
+               printk(KERN_ERR "Invalid YIELD Qualifier\n");
+               break;
+       case 3:
+               printk(KERN_ERR "Gating Storage Exception\n");
+               break;
+       case 4:
+               printk(KERN_ERR "YIELD Scheduler Exception\n");
+               break;
+       case 5:
+               printk(KERN_ERR "Gating Storage Schedulier Exception\n");
+               break;
+       default:
+               printk(KERN_ERR "*** UNKNOWN THREAD EXCEPTION %d ***\n",
+                       subcode);
+               break;
+       }
        die_if_kernel("MIPS MT Thread exception in kernel", regs);
 
        force_sig(SIGILL, current);
@@ -929,7 +967,15 @@ void ejtag_exception_handler(struct pt_regs *regs)
  */
 void nmi_exception_handler(struct pt_regs *regs)
 {
+#ifdef CONFIG_MIPS_MT_SMTC
+       unsigned long dvpret = dvpe();
+       bust_spinlocks(1);
+       printk("NMI taken!!!!\n");
+       mips_mt_regdump(dvpret);
+#else
+       bust_spinlocks(1);
        printk("NMI taken!!!!\n");
+#endif /* CONFIG_MIPS_MT_SMTC */
        die("NMI", regs);
        while(1) ;
 }
@@ -1007,7 +1053,7 @@ again:
        return set;
 }
 
-void mips_srs_free (int set)
+void mips_srs_free(int set)
 {
        struct shadow_registers *sr = &shadow_registers;
 
@@ -1027,8 +1073,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs)
        if (addr == NULL) {
                handler = (unsigned long) do_default_vi;
                srs = 0;
-       }
-       else
+       } else
                handler = (unsigned long) addr;
        vi_handlers[n] = (unsigned long) addr;
 
@@ -1040,8 +1085,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs)
        if (cpu_has_veic) {
                if (board_bind_eic_interrupt)
                        board_bind_eic_interrupt (n, srs);
-       }
-       else if (cpu_has_vint) {
+       } else if (cpu_has_vint) {
                /* SRSMap is only defined if shadow sets are implemented */
                if (mips_srs_max() > 1)
                        change_c0_srsmap (0xf << n*4, srs << n*4);
@@ -1055,6 +1099,15 @@ static void *set_vi_srs_handler(int n, void *addr, int srs)
 
                extern char except_vec_vi, except_vec_vi_lui;
                extern char except_vec_vi_ori, except_vec_vi_end;
+#ifdef CONFIG_MIPS_MT_SMTC
+               /*
+                * We need to provide the SMTC vectored interrupt handler
+                * not only with the address of the handler, but with the
+                * Status.IM bit to be masked before going there.
+                */
+               extern char except_vec_vi_mori;
+               const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
+#endif /* CONFIG_MIPS_MT_SMTC */
                const int handler_len = &except_vec_vi_end - &except_vec_vi;
                const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
                const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
@@ -1068,6 +1121,12 @@ static void *set_vi_srs_handler(int n, void *addr, int srs)
                }
 
                memcpy (b, &except_vec_vi, handler_len);
+#ifdef CONFIG_MIPS_MT_SMTC
+               if (n > 7)
+                       printk("Vector index %d exceeds SMTC maximum\n", n);
+               w = (u32 *)(b + mori_offset);
+               *w = (*w & 0xffff0000) | (0x100 << n);
+#endif /* CONFIG_MIPS_MT_SMTC */
                w = (u32 *)(b + lui_offset);
                *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
                w = (u32 *)(b + ori_offset);
@@ -1090,7 +1149,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs)
        return (void *)old_handler;
 }
 
-void *set_vi_handler (int n, void *addr)
+void *set_vi_handler(int n, void *addr)
 {
        return set_vi_srs_handler(n, addr, 0);
 }
@@ -1108,8 +1167,29 @@ extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
 extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
 
+#ifdef CONFIG_SMP
+static int smp_save_fp_context(struct sigcontext *sc)
+{
+       return cpu_has_fpu
+              ? _save_fp_context(sc)
+              : fpu_emulator_save_context(sc);
+}
+
+static int smp_restore_fp_context(struct sigcontext *sc)
+{
+       return cpu_has_fpu
+              ? _restore_fp_context(sc)
+              : fpu_emulator_restore_context(sc);
+}
+#endif
+
 static inline void signal_init(void)
 {
+#ifdef CONFIG_SMP
+       /* For now just do the cpu_has_fpu check when the functions are invoked */
+       save_fp_context = smp_save_fp_context;
+       restore_fp_context = smp_restore_fp_context;
+#else
        if (cpu_has_fpu) {
                save_fp_context = _save_fp_context;
                restore_fp_context = _restore_fp_context;
@@ -1117,6 +1197,7 @@ static inline void signal_init(void)
                save_fp_context = fpu_emulator_save_context;
                restore_fp_context = fpu_emulator_restore_context;
        }
+#endif
 }
 
 #ifdef CONFIG_MIPS32_COMPAT
@@ -1153,6 +1234,20 @@ void __init per_cpu_trap_init(void)
 {
        unsigned int cpu = smp_processor_id();
        unsigned int status_set = ST0_CU0;
+#ifdef CONFIG_MIPS_MT_SMTC
+       int secondaryTC = 0;
+       int bootTC = (cpu == 0);
+
+       /*
+        * Only do per_cpu_trap_init() for first TC of Each VPE.
+        * Note that this hack assumes that the SMTC init code
+        * assigns TCs consecutively and in ascending order.
+        */
+
+       if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
+           ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
+               secondaryTC = 1;
+#endif /* CONFIG_MIPS_MT_SMTC */
 
        /*
         * Disable coprocessors and select 32-bit or 64-bit addressing
@@ -1175,6 +1270,10 @@ void __init per_cpu_trap_init(void)
        write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
 #endif
 
+#ifdef CONFIG_MIPS_MT_SMTC
+       if (!secondaryTC) {
+#endif /* CONFIG_MIPS_MT_SMTC */
+
        /*
         * Interrupt handling.
         */
@@ -1191,6 +1290,9 @@ void __init per_cpu_trap_init(void)
                } else
                        set_c0_cause(CAUSEF_IV);
        }
+#ifdef CONFIG_MIPS_MT_SMTC
+       }
+#endif /* CONFIG_MIPS_MT_SMTC */
 
        cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
        TLBMISS_HANDLER_SETUP();
@@ -1200,8 +1302,14 @@ void __init per_cpu_trap_init(void)
        BUG_ON(current->mm);
        enter_lazy_tlb(&init_mm, current);
 
-       cpu_cache_init();
-       tlb_init();
+#ifdef CONFIG_MIPS_MT_SMTC
+       if (bootTC) {
+#endif /* CONFIG_MIPS_MT_SMTC */
+               cpu_cache_init();
+               tlb_init();
+#ifdef CONFIG_MIPS_MT_SMTC
+       }
+#endif /* CONFIG_MIPS_MT_SMTC */
 }
 
 /* Install CPU exception handler */
index 2ad0cedf29fee2236c32d7e1ec1cd3ac34b0b0ad..14fa00e3cdfa6007332e58fbd84fc9eba3c4d1ac 100644 (file)
@@ -2,7 +2,7 @@
 #include <asm/asm-offsets.h>
 #include <asm-generic/vmlinux.lds.h>
 
-#undef mips            /* CPP really sucks for this job  */
+#undef mips
 #define mips mips
 OUTPUT_ARCH(mips)
 ENTRY(kernel_entry)
index eab5a705e9892732635938f393690ed1c514eda1..17dfe6a8cab9b38b9b3620217953c57c6a118406 100644 (file)
@@ -220,7 +220,6 @@ void __init kgdb_config (void)
                                generic_putDebugChar (*s++);
                }
 
-               kgdb_enabled = 1;
                /* Breakpoint is invoked after interrupts are initialised */
        }
 }
index 93f3bf2c2b22364217e4fe9ae925b1e36b8470e0..a9f6124b3a227ee722288a5b239ddbfff3f821dc 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/mc146818rtc.h>
 
 #include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
 #include <asm/ptrace.h>
 #include <asm/hardirq.h>
 #include <asm/irq.h>
@@ -50,16 +51,23 @@ unsigned long cpu_khz;
 static char display_string[] = "        LINUX ON ATLAS       ";
 #endif
 #if defined(CONFIG_MIPS_MALTA)
+#if defined(CONFIG_MIPS_MT_SMTC)
+static char display_string[] = "       SMTC LINUX ON MALTA       ";
+#else
 static char display_string[] = "        LINUX ON MALTA       ";
+#endif /* CONFIG_MIPS_MT_SMTC */
 #endif
 #if defined(CONFIG_MIPS_SEAD)
 static char display_string[] = "        LINUX ON SEAD       ";
 #endif
-static unsigned int display_count = 0;
+static unsigned int display_count;
 #define MAX_DISPLAY_COUNT (sizeof(display_string) - 8)
 
-static unsigned int timer_tick_count=0;
+#define CPUCTR_IMASKBIT (0x100 << MIPSCPU_INT_CPUCTR)
+
+static unsigned int timer_tick_count;
 static int mips_cpu_timer_irq;
+extern void smtc_timer_broadcast(int);
 
 static inline void scroll_display_message(void)
 {
@@ -75,15 +83,55 @@ static void mips_timer_dispatch (struct pt_regs *regs)
        do_IRQ (mips_cpu_timer_irq, regs);
 }
 
+/*
+ * Redeclare until I get around mopping the timer code insanity on MIPS.
+ */
 extern int null_perf_irq(struct pt_regs *regs);
 
 extern int (*perf_irq)(struct pt_regs *regs);
 
 irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 {
-       int r2 = cpu_has_mips_r2;
        int cpu = smp_processor_id();
+       int r2 = cpu_has_mips_r2;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+        /*
+        *  In an SMTC system, one Count/Compare set exists per VPE.
+        *  Which TC within a VPE gets the interrupt is essentially
+        *  random - we only know that it shouldn't be one with
+        *  IXMT set. Whichever TC gets the interrupt needs to
+        *  send special interprocessor interrupts to the other
+        *  TCs to make sure that they schedule, etc.
+        *
+        *  That code is specific to the SMTC kernel, not to
+        *  the a particular platform, so it's invoked from
+        *  the general MIPS timer_interrupt routine.
+        */
+
+       /*
+        * DVPE is necessary so long as cross-VPE interrupts
+        * are done via read-modify-write of Cause register.
+        */
+       int vpflags = dvpe();
+       write_c0_compare (read_c0_count() - 1);
+       clear_c0_cause(CPUCTR_IMASKBIT);
+       evpe(vpflags);
+
+       if (cpu_data[cpu].vpe_id == 0) {
+               timer_interrupt(irq, dev_id, regs);
+               scroll_display_message();
+       } else
+               write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ));
+       smtc_timer_broadcast(cpu_data[cpu].vpe_id);
 
+       if (cpu != 0)
+               /*
+                * Other CPUs should do profiling and process accounting
+                */
+               local_timer_interrupt(irq, dev_id, regs);
+
+#else /* CONFIG_MIPS_MT_SMTC */
        if (cpu == 0) {
                /*
                 * CPU 0 handles the global timer interrupt job and process
@@ -107,12 +155,14 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
                 * More support needs to be added to kernel/time for
                 * counter/timer interrupts on multiple CPU's
                 */
-               write_c0_compare (read_c0_count() + (mips_hpt_frequency/HZ));
+               write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));
+
                /*
-                * other CPUs should do profiling and process accounting
+                * Other CPUs should do profiling and process accounting
                 */
-               local_timer_interrupt (irq, dev_id, regs);
+               local_timer_interrupt(irq, dev_id, regs);
        }
+#endif /* CONFIG_MIPS_MT_SMTC */
 
 out:
        return IRQ_HANDLED;
@@ -126,7 +176,7 @@ static unsigned int __init estimate_cpu_frequency(void)
        unsigned int prid = read_c0_prid() & 0xffff00;
        unsigned int count;
 
-#ifdef CONFIG_MIPS_SEAD
+#if defined(CONFIG_MIPS_SEAD) || defined(CONFIG_MIPS_SIM)
        /*
         * The SEAD board doesn't have a real time clock, so we can't
         * really calculate the timer frequency
@@ -211,7 +261,11 @@ void __init mips_timer_setup(struct irqaction *irq)
 
        /* we are using the cpu counter for timer interrupts */
        irq->handler = mips_timer_interrupt;    /* we use our own handler */
+#ifdef CONFIG_MIPS_MT_SMTC
+       setup_irq_smtc(mips_cpu_timer_irq, irq, CPUCTR_IMASKBIT);
+#else
        setup_irq(mips_cpu_timer_irq, irq);
+#endif /* CONFIG_MIPS_MT_SMTC */
 
 #ifdef CONFIG_SMP
        /* irq_desc(riptor) is a global resource, when the interrupt overlaps
index fd4c143c0e2f712cf0cf8c68e0b70a0fed93a53b..77ee5c6d33c119964da2619f5484cf83772e3b49 100644 (file)
@@ -20,3 +20,4 @@
 #
 
 obj-y := malta_int.o malta_setup.o
+obj-$(CONFIG_SMP) += malta_smp.o
index 1da8c18b9c8e8c397907372f5f06b4fe8f597f0f..64db07d4dbe500d5c3a66355ff1f19c9f6e9ec34 100644 (file)
@@ -118,8 +118,9 @@ static void malta_hw0_irqdispatch(struct pt_regs *regs)
        int irq;
 
        irq = get_int();
-       if (irq < 0)
+       if (irq < 0) {
                return;  /* interrupt has already been cleared */
+       }
 
        do_IRQ(MALTA_INT_BASE+irq, regs);
 }
@@ -324,9 +325,15 @@ void __init arch_init_irq(void)
        else if (cpu_has_vint) {
                set_vi_handler (MIPSCPU_INT_I8259A, malta_hw0_irqdispatch);
                set_vi_handler (MIPSCPU_INT_COREHI, corehi_irqdispatch);
-
+#ifdef CONFIG_MIPS_MT_SMTC
+               setup_irq_smtc (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq,
+                       (0x100 << MIPSCPU_INT_I8259A));
+               setup_irq_smtc (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI,
+                       &corehi_irqaction, (0x100 << MIPSCPU_INT_COREHI));
+#else /* Not SMTC */
                setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq);
                setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction);
+#endif /* CONFIG_MIPS_MT_SMTC */
        }
        else {
                setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq);
diff --git a/arch/mips/mips-boards/malta/malta_smp.c b/arch/mips/mips-boards/malta/malta_smp.c
new file mode 100644 (file)
index 0000000..6c6c8ee
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * Malta Platform-specific hooks for SMP operation
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+
+#include <asm/atomic.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#ifdef CONFIG_MIPS_MT_SMTC
+#include <asm/smtc_ipi.h>
+#endif /* CONFIG_MIPS_MT_SMTC */
+
+/* VPE/SMP Prototype implements platform interfaces directly */
+#if !defined(CONFIG_MIPS_MT_SMP)
+
+/*
+ * Cause the specified action to be performed on a targeted "CPU"
+ */
+
+void core_send_ipi(int cpu, unsigned int action)
+{
+/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
+#ifdef CONFIG_MIPS_MT_SMTC
+       smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
+#endif /* CONFIG_MIPS_MT_SMTC */
+}
+
+/*
+ * Detect available CPUs/VPEs/TCs and populate phys_cpu_present_map
+ */
+
+void __init prom_build_cpu_map(void)
+{
+       int nextslot;
+
+       /*
+        * As of November, 2004, MIPSsim only simulates one core
+        * at a time.  However, that core may be a MIPS MT core
+        * with multiple virtual processors and thread contexts.
+        */
+
+       if (read_c0_config3() & (1<<2)) {
+               nextslot = mipsmt_build_cpu_map(1);
+       }
+}
+
+/*
+ * Platform "CPU" startup hook
+ */
+
+void prom_boot_secondary(int cpu, struct task_struct *idle)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+       smtc_boot_secondary(cpu, idle);
+#endif /* CONFIG_MIPS_MT_SMTC */
+}
+
+/*
+ * Post-config but pre-boot cleanup entry point
+ */
+
+void prom_init_secondary(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+        void smtc_init_secondary(void);
+       int myvpe;
+
+       /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
+       myvpe = read_c0_tcbind() & TCBIND_CURVPE;
+       if (myvpe != 0) {
+               /* Ideally, this should be done only once per VPE, but... */
+               clear_c0_status(STATUSF_IP2);
+               set_c0_status(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP3
+                               | STATUSF_IP4 | STATUSF_IP5 | STATUSF_IP6
+                               | STATUSF_IP7);
+       }
+
+        smtc_init_secondary();
+#endif /* CONFIG_MIPS_MT_SMTC */
+}
+
+/*
+ * Platform SMP pre-initialization
+ *
+ * As noted above, we can assume a single CPU for now
+ * but it may be multithreaded.
+ */
+
+void plat_smp_setup(void)
+{
+       if (read_c0_config3() & (1<<2))
+               mipsmt_build_cpu_map(0);
+}
+
+void __init plat_prepare_cpus(unsigned int max_cpus)
+{
+       if (read_c0_config3() & (1<<2))
+               mipsmt_prepare_cpus();
+}
+
+/*
+ * SMP initialization finalization entry point
+ */
+
+void prom_smp_finish(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+       smtc_smp_finish();
+#endif /* CONFIG_MIPS_MT_SMTC */
+}
+
+/*
+ * Hook for after all CPUs are online
+ */
+
+void prom_cpus_done(void)
+{
+}
+
+#endif /* CONFIG_MIPS32R2_MT_SMP */
diff --git a/arch/mips/mips-boards/sim/cmdline.c b/arch/mips/mips-boards/sim/cmdline.c
deleted file mode 100644 (file)
index fef9fbd..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Kernel command line creation using the prom monitor (YAMON) argc/argv.
- */
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-
-extern int prom_argc;
-extern int *_prom_argv;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension.
- */
-#define prom_argv(index) ((char *)(((int *)(int)_prom_argv)[(index)]))
-
-char arcs_cmdline[CL_SIZE];
-
-char * __init prom_getcmdline(void)
-{
-       return &(arcs_cmdline[0]);
-}
-
-
-void  __init prom_init_cmdline(void)
-{
-       char *cp;
-       int actr;
-
-       actr = 1; /* Always ignore argv[0] */
-
-       cp = &(arcs_cmdline[0]);
-       while(actr < prom_argc) {
-               strcpy(cp, prom_argv(actr));
-               cp += strlen(prom_argv(actr));
-               *cp++ = ' ';
-               actr++;
-       }
-       if (cp != &(arcs_cmdline[0])) /* get rid of trailing space */
-               --cp;
-       *cp = '\0';
-}
index 9df37c6fca36ac5815c02d2017ad9566b1515675..c63021a5dc6c8928f216a8c35911ca90f0ec48b9 100644 (file)
@@ -26,8 +26,10 @@ char * __init prom_getcmdline(void)
        return arcs_cmdline;
 }
 
-
 void  __init prom_init_cmdline(void)
 {
-    /* nothing to do */
+       char *cp;
+       cp = arcs_cmdline;
+       /* Get boot line from environment? */
+       *cp = '\0';
 }
index a9f0c2bfe4ad7b4ee63480d6ec4b88b919a80c17..b7084e7c4bf9b7c4bce0ee742301ddfe198e85d5 100644 (file)
@@ -44,8 +44,6 @@
 void core_send_ipi(int cpu, unsigned int action)
 {
 #ifdef CONFIG_MIPS_MT_SMTC
-       void smtc_send_ipi(int, int, unsigned int);
-
        smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
 #endif /* CONFIG_MIPS_MT_SMTC */
 /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
@@ -59,15 +57,8 @@ void core_send_ipi(int cpu, unsigned int action)
 void __init prom_build_cpu_map(void)
 {
 #ifdef CONFIG_MIPS_MT_SMTC
-       extern int mipsmt_build_cpu_map(int startslot);
        int nextslot;
 
-       cpus_clear(phys_cpu_present_map);
-
-       /* Register the boot CPU */
-
-       smp_prepare_boot_cpu();
-
        /*
         * As of November, 2004, MIPSsim only simulates one core
         * at a time.  However, that core may be a MIPS MT core
@@ -87,8 +78,6 @@ void __init prom_build_cpu_map(void)
 void prom_boot_secondary(int cpu, struct task_struct *idle)
 {
 #ifdef CONFIG_MIPS_MT_SMTC
-       extern void smtc_boot_secondary(int cpu, struct task_struct *t);
-
        smtc_boot_secondary(cpu, idle);
 #endif /* CONFIG_MIPS_MT_SMTC */
 }
@@ -113,7 +102,6 @@ void prom_init_secondary(void)
 void prom_prepare_cpus(unsigned int max_cpus)
 {
 #ifdef CONFIG_MIPS_MT_SMTC
-       void mipsmt_prepare_cpus(int c);
        /*
         * As noted above, we can assume a single CPU for now
         * but it may be multithreaded.
@@ -132,8 +120,6 @@ void prom_prepare_cpus(unsigned int max_cpus)
 void prom_smp_finish(void)
 {
 #ifdef CONFIG_MIPS_MT_SMTC
-       void smtc_smp_finish(void);
-
        smtc_smp_finish();
 #endif /* CONFIG_MIPS_MT_SMTC */
 }
index 2d9624fd10ecb4ad61cc33f55901f2109d6a4137..e3a617224868f04643719e68c9020d9941688efc 100644 (file)
@@ -157,7 +157,6 @@ no_context:
         * Oops. The kernel tried to access some bad page. We'll have to
         * terminate things with extreme prejudice.
         */
-
        bust_spinlocks(1);
 
        printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
@@ -188,11 +187,20 @@ do_sigbus:
        /* Kernel mode? Handle exceptions or die */
        if (!user_mode(regs))
                goto no_context;
-
+       else
        /*
         * Send a sigbus, regardless of whether we were in kernel
         * or user mode.
         */
+#if 0
+               printk("do_page_fault() #3: sending SIGBUS to %s for "
+                      "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
+                      tsk->comm,
+                      write ? "write access to" : "read access from",
+                      field, address,
+                      field, (unsigned long) regs->cp0_epc,
+                      field, (unsigned long) regs->regs[31]);
+#endif
        tsk->thread.cp0_badvaddr = address;
        info.si_signo = SIGBUS;
        info.si_errno = 0;
@@ -201,7 +209,6 @@ do_sigbus:
        force_sig_info(SIGBUS, &info, tsk);
 
        return;
-
 vmalloc_fault:
        {
                /*
index a865f2394cb0d1de5d44acfdbbb1ee45b83beb28..9dca099ba16b01b907568d9f387f5ea9d693d665 100644 (file)
@@ -32,13 +32,35 @@ extern void build_tlb_refill_handler(void);
                                     "nop; nop; nop; nop; nop; nop;\n\t" \
                                     ".set reorder\n\t")
 
+/* Atomicity and interruptability */
+#ifdef CONFIG_MIPS_MT_SMTC
+
+#include <asm/smtc.h>
+#include <asm/mipsmtregs.h>
+
+#define ENTER_CRITICAL(flags) \
+       { \
+       unsigned int mvpflags; \
+       local_irq_save(flags);\
+       mvpflags = dvpe()
+#define EXIT_CRITICAL(flags) \
+       evpe(mvpflags); \
+       local_irq_restore(flags); \
+       }
+#else
+
+#define ENTER_CRITICAL(flags) local_irq_save(flags)
+#define EXIT_CRITICAL(flags) local_irq_restore(flags)
+
+#endif /* CONFIG_MIPS_MT_SMTC */
+
 void local_flush_tlb_all(void)
 {
        unsigned long flags;
        unsigned long old_ctx;
        int entry;
 
-       local_irq_save(flags);
+       ENTER_CRITICAL(flags);
        /* Save old context and create impossible VPN2 value */
        old_ctx = read_c0_entryhi();
        write_c0_entrylo0(0);
@@ -57,7 +79,7 @@ void local_flush_tlb_all(void)
        }
        tlbw_use_hazard();
        write_c0_entryhi(old_ctx);
-       local_irq_restore(flags);
+       EXIT_CRITICAL(flags);
 }
 
 /* All entries common to a mm share an asid.  To effectively flush
@@ -87,6 +109,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                unsigned long flags;
                int size;
 
+               ENTER_CRITICAL(flags);
                size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
                size = (size + 1) >> 1;
                local_irq_save(flags);
@@ -120,7 +143,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                } else {
                        drop_mmu_context(mm, cpu);
                }
-               local_irq_restore(flags);
+               EXIT_CRITICAL(flags);
        }
 }
 
@@ -129,9 +152,9 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
        unsigned long flags;
        int size;
 
+       ENTER_CRITICAL(flags);
        size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        size = (size + 1) >> 1;
-       local_irq_save(flags);
        if (size <= current_cpu_data.tlbsize / 2) {
                int pid = read_c0_entryhi();
 
@@ -162,7 +185,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
        } else {
                local_flush_tlb_all();
        }
-       local_irq_restore(flags);
+       EXIT_CRITICAL(flags);
 }
 
 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
@@ -175,7 +198,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 
                newpid = cpu_asid(cpu, vma->vm_mm);
                page &= (PAGE_MASK << 1);
-               local_irq_save(flags);
+               ENTER_CRITICAL(flags);
                oldpid = read_c0_entryhi();
                write_c0_entryhi(page | newpid);
                mtc0_tlbw_hazard();
@@ -194,7 +217,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 
        finish:
                write_c0_entryhi(oldpid);
-               local_irq_restore(flags);
+               EXIT_CRITICAL(flags);
        }
 }
 
@@ -207,7 +230,7 @@ void local_flush_tlb_one(unsigned long page)
        unsigned long flags;
        int oldpid, idx;
 
-       local_irq_save(flags);
+       ENTER_CRITICAL(flags);
        oldpid = read_c0_entryhi();
        page &= (PAGE_MASK << 1);
        write_c0_entryhi(page);
@@ -226,7 +249,7 @@ void local_flush_tlb_one(unsigned long page)
        }
        write_c0_entryhi(oldpid);
 
-       local_irq_restore(flags);
+       EXIT_CRITICAL(flags);
 }
 
 /*
@@ -249,7 +272,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
        if (current->active_mm != vma->vm_mm)
                return;
 
-       local_irq_save(flags);
+       ENTER_CRITICAL(flags);
 
        pid = read_c0_entryhi() & ASID_MASK;
        address &= (PAGE_MASK << 1);
@@ -277,7 +300,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
        else
                tlb_write_indexed();
        tlbw_use_hazard();
-       local_irq_restore(flags);
+       EXIT_CRITICAL(flags);
 }
 
 #if 0
@@ -291,7 +314,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
        pte_t *ptep;
        int idx;
 
-       local_irq_save(flags);
+       ENTER_CRITICAL(flags);
        address &= (PAGE_MASK << 1);
        asid = read_c0_entryhi() & ASID_MASK;
        write_c0_entryhi(address | asid);
@@ -310,7 +333,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
        else
                tlb_write_indexed();
        tlbw_use_hazard();
-       local_irq_restore(flags);
+       EXIT_CRITICAL(flags);
 }
 #endif
 
@@ -322,7 +345,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
        unsigned long old_pagemask;
        unsigned long old_ctx;
 
-       local_irq_save(flags);
+       ENTER_CRITICAL(flags);
        /* Save old context and create impossible VPN2 value */
        old_ctx = read_c0_entryhi();
        old_pagemask = read_c0_pagemask();
@@ -342,7 +365,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
        BARRIER;
        write_c0_pagemask(old_pagemask);
        local_flush_tlb_all();
-       local_irq_restore(flags);
+       EXIT_CRITICAL(flags);
 }
 
 /*
@@ -362,7 +385,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
        unsigned long old_pagemask;
        unsigned long old_ctx;
 
-       local_irq_save(flags);
+       ENTER_CRITICAL(flags);
        /* Save old context and create impossible VPN2 value */
        old_ctx = read_c0_entryhi();
        old_pagemask = read_c0_pagemask();
@@ -386,10 +409,11 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
        write_c0_entryhi(old_ctx);
        write_c0_pagemask(old_pagemask);
 out:
-       local_irq_restore(flags);
+       EXIT_CRITICAL(flags);
        return ret;
 }
 
+extern void __init sanitize_tlb_entries(void);
 static void __init probe_tlb(unsigned long config)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
@@ -402,6 +426,14 @@ static void __init probe_tlb(unsigned long config)
         */
        if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
                return;
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * If TLB is shared in SMTC system, total size already
+        * has been calculated and written into cpu_data tlbsize
+        */
+       if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
+               return;
+#endif /* CONFIG_MIPS_MT_SMTC */
 
        reg = read_c0_config1();
        if (!((config >> 7) & 3))
@@ -410,6 +442,15 @@ static void __init probe_tlb(unsigned long config)
        c->tlbsize = ((reg >> 25) & 0x3f) + 1;
 }
 
+static int __initdata ntlb = 0;
+static int __init set_ntlb(char *str)
+{
+       get_option(&str, &ntlb);
+       return 1;
+}
+
+__setup("ntlb=", set_ntlb);
+
 void __init tlb_init(void)
 {
        unsigned int config = read_c0_config();
@@ -432,5 +473,15 @@ void __init tlb_init(void)
 
        /* Did I tell you that ARC SUCKS?  */
 
+       if (ntlb) {
+               if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
+                       int wired = current_cpu_data.tlbsize - ntlb;
+                       write_c0_wired(wired);
+                       write_c0_index(wired-1);
+                       printk ("Restricting TLB to %d entries\n", ntlb);
+               } else
+                       printk("Ignoring invalid argument ntlb=%d\n", ntlb);
+       }
+
        build_tlb_refill_handler();
 }
index c5eea6ae12ca2d0b1643d79cad766ce7d7150467..053dbacac56bdf4d5a797b0d115ddb1951297aa0 100644 (file)
@@ -7,6 +7,16 @@
  *
  * Copyright (C) 2004,2005 by Thiemo Seufer
  * Copyright (C) 2005  Maciej W. Rozycki
+ * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ *
+ * ... and the days got worse and worse and now you see
+ * I've gone completly out of my mind.
+ *
+ * They're coming to take me a away haha
+ * they're coming to take me a away hoho hihi haha
+ * to the funny farm where code is beautiful all the time ...
+ *
+ * (Condolences to Napoleon XIV)
  */
 
 #include <stdarg.h>
@@ -68,6 +78,7 @@ enum fields
        BIMM = 0x040,
        JIMM = 0x080,
        FUNC = 0x100,
+       SET = 0x200
 };
 
 #define OP_MASK                0x2f
@@ -86,6 +97,8 @@ enum fields
 #define JIMM_SH                0
 #define FUNC_MASK      0x2f
 #define FUNC_SH                0
+#define SET_MASK       0x7
+#define SET_SH         0
 
 enum opcode {
        insn_invalid,
@@ -129,8 +142,8 @@ static __initdata struct insn insn_table[] = {
        { insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM },
        { insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM },
        { insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD },
-       { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD },
-       { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD },
+       { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD | SET},
+       { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD | SET},
        { insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE },
        { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE },
        { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE },
@@ -145,8 +158,8 @@ static __initdata struct insn insn_table[] = {
        { insn_lld, M(lld_op,0,0,0,0,0), RS | RT | SIMM },
        { insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM },
        { insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM },
-       { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD },
-       { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD },
+       { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD | SET},
+       { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD | SET},
        { insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM },
        { insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 },
        { insn_sc, M(sc_op,0,0,0,0,0), RS | RT | SIMM },
@@ -242,6 +255,14 @@ static __init u32 build_func(u32 arg)
        return arg & FUNC_MASK;
 }
 
+static __init u32 build_set(u32 arg)
+{
+       if (arg & ~SET_MASK)
+               printk(KERN_WARNING "TLB synthesizer field overflow\n");
+
+       return arg & SET_MASK;
+}
+
 /*
  * The order of opcode arguments is implicitly left to right,
  * starting with RS and ending with FUNC or IMM.
@@ -273,6 +294,7 @@ static void __init build_insn(u32 **buf, enum opcode opc, ...)
        if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32));
        if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32));
        if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32));
+       if (ip->fields & SET) op |= build_set(va_arg(ap, u32));
        va_end(ap);
 
        **buf = op;
@@ -358,8 +380,8 @@ I_u1s2(_bgezl);
 I_u1s2(_bltz);
 I_u1s2(_bltzl);
 I_u1u2s3(_bne);
-I_u1u2(_dmfc0);
-I_u1u2(_dmtc0);
+I_u1u2u3(_dmfc0);
+I_u1u2u3(_dmtc0);
 I_u2u1s3(_daddiu);
 I_u3u1u2(_daddu);
 I_u2u1u3(_dsll);
@@ -376,8 +398,8 @@ I_u2s3u1(_ll);
 I_u2s3u1(_lld);
 I_u1s2(_lui);
 I_u2s3u1(_lw);
-I_u1u2(_mfc0);
-I_u1u2(_mtc0);
+I_u1u2u3(_mfc0);
+I_u1u2u3(_mtc0);
 I_u2u1u3(_ori);
 I_0(_rfe);
 I_u2s3u1(_sc);
@@ -451,8 +473,8 @@ L_LA(_r3000_write_probe_fail)
 # define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh)
 # define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh)
 # define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh)
-# define i_MFC0(buf, rt, rd) i_dmfc0(buf, rt, rd)
-# define i_MTC0(buf, rt, rd) i_dmtc0(buf, rt, rd)
+# define i_MFC0(buf, rt, rd...) i_dmfc0(buf, rt, rd)
+# define i_MTC0(buf, rt, rd...) i_dmtc0(buf, rt, rd)
 # define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val)
 # define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd)
 # define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd)
@@ -464,8 +486,8 @@ L_LA(_r3000_write_probe_fail)
 # define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh)
 # define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh)
 # define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh)
-# define i_MFC0(buf, rt, rd) i_mfc0(buf, rt, rd)
-# define i_MTC0(buf, rt, rd) i_mtc0(buf, rt, rd)
+# define i_MFC0(buf, rt, rd...) i_mfc0(buf, rt, rd)
+# define i_MTC0(buf, rt, rd...) i_mtc0(buf, rt, rd)
 # define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val)
 # define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd)
 # define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd)
@@ -670,14 +692,15 @@ static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg,
 #define K1             27
 
 /* Some CP0 registers */
-#define C0_INDEX       0
-#define C0_ENTRYLO0    2
-#define C0_ENTRYLO1    3
-#define C0_CONTEXT     4
-#define C0_BADVADDR    8
-#define C0_ENTRYHI     10
-#define C0_EPC         14
-#define C0_XCONTEXT    20
+#define C0_INDEX       0, 0
+#define C0_ENTRYLO0    2, 0
+#define C0_TCBIND      2, 2
+#define C0_ENTRYLO1    3, 0
+#define C0_CONTEXT     4, 0
+#define C0_BADVADDR    8, 0
+#define C0_ENTRYHI     10, 0
+#define C0_EPC         14, 0
+#define C0_XCONTEXT    20, 0
 
 #ifdef CONFIG_64BIT
 # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT)
@@ -951,12 +974,20 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
        /* No i_nop needed here, since the next insn doesn't touch TMP. */
 
 #ifdef CONFIG_SMP
+# ifdef  CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC uses TCBind value as "CPU" index
+        */
+       i_mfc0(p, ptr, C0_TCBIND);
+       i_dsrl(p, ptr, ptr, 19);
+# else
        /*
         * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
         * stored in CONTEXT.
         */
        i_dmfc0(p, ptr, C0_CONTEXT);
        i_dsrl(p, ptr, ptr, 23);
+#endif
        i_LA_mostly(p, tmp, pgdc);
        i_daddu(p, ptr, ptr, tmp);
        i_dmfc0(p, tmp, C0_BADVADDR);
@@ -1014,9 +1045,21 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
 
        /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
 #ifdef CONFIG_SMP
+#ifdef  CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC uses TCBind value as "CPU" index
+        */
+       i_mfc0(p, ptr, C0_TCBIND);
+       i_LA_mostly(p, tmp, pgdc);
+       i_srl(p, ptr, ptr, 19);
+#else
+       /*
+        * smp_processor_id() << 3 is stored in CONTEXT.
+         */
        i_mfc0(p, ptr, C0_CONTEXT);
        i_LA_mostly(p, tmp, pgdc);
        i_srl(p, ptr, ptr, 23);
+#endif
        i_addu(p, ptr, tmp, ptr);
 #else
        i_LA_mostly(p, ptr, pgdc);
index 30b18ea6cb1116fd12e86094788c5917fe9f7e99..f54aa147ec19066ac6aa9f729f72e78f35958153 100644 (file)
 #ifdef CONFIG_64BIT
 #include <asm/asmmacro-64.h>
 #endif
+#ifdef CONFIG_MIPS_MT_SMTC
+#include <asm/mipsmtregs.h>
+#endif
 
+#ifdef CONFIG_MIPS_MT_SMTC
+       .macro  local_irq_enable reg=t0
+       mfc0    \reg, CP0_TCSTATUS
+       ori     \reg, \reg, TCSTATUS_IXMT
+       xori    \reg, \reg, TCSTATUS_IXMT
+       mtc0    \reg, CP0_TCSTATUS
+       ehb
+       .endm
+
+       .macro  local_irq_disable reg=t0
+       mfc0    \reg, CP0_TCSTATUS
+       ori     \reg, \reg, TCSTATUS_IXMT
+       mtc0    \reg, CP0_TCSTATUS
+       ehb
+       .endm
+#else
        .macro  local_irq_enable reg=t0
        mfc0    \reg, CP0_STATUS
        ori     \reg, \reg, 1
@@ -32,6 +51,7 @@
        mtc0    \reg, CP0_STATUS
        irq_disable_hazard
        .endm
+#endif /* CONFIG_MIPS_MT_SMTC */
 
 #ifdef CONFIG_CPU_SB1
        .macro  fpu_enable_hazard
        .endm
 #endif
 
+/*
+ * Temporary until all gas have MT ASE support
+ */
+       .macro  DMT     reg=0
+       .word   (0x41600bc1 | (\reg << 16))
+       .endm
+
+       .macro  EMT     reg=0
+       .word   (0x41600be1 | (\reg << 16))
+       .endm
+
+       .macro  DVPE    reg=0
+       .word   (0x41600001 | (\reg << 16))
+       .endm
+
+       .macro  EVPE    reg=0
+       .word   (0x41600021 | (\reg << 16))
+       .endm
+
+       .macro  MFTR    rt=0, rd=0, u=0, sel=0
+        .word  (0x41000000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel))
+       .endm
+
+       .macro  MTTR    rt=0, rd=0, u=0, sel=0
+        .word  (0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel))
+       .endm
+
 #endif /* _ASM_ASMMACRO_H */
index 140be1c67da7c734d38471df1ebe0ebf80ad9e1c..6572ac703662068da6de5ad81c8b952aec4ee591 100644 (file)
@@ -73,6 +73,16 @@ struct cpuinfo_mips {
        struct cache_desc       dcache; /* Primary D or combined I/D cache */
        struct cache_desc       scache; /* Secondary cache */
        struct cache_desc       tcache; /* Tertiary/split secondary cache */
+#if defined(CONFIG_MIPS_MT_SMTC)
+       /*
+        * In the MIPS MT "SMTC" model, each TC is considered
+        * to be a "CPU" for the purposes of scheduling, but
+        * exception resources, ASID spaces, etc, are common
+        * to all TCs within the same VPE.
+        */
+       int                     vpe_id;  /* Virtual Processor number */
+       int                     tc_id;   /* Thread Context number */
+#endif /* CONFIG_MIPS_MT */
        void                    *data;  /* Additional data */
 } __attribute__((aligned(SMP_CACHE_BYTES)));
 
index feb29a79388869199d0331fae77273a1d0473873..dadc05188db717c288ab9ecd6a86d483e4731119 100644 (file)
@@ -284,6 +284,8 @@ do {                                                                        \
 #define instruction_hazard() do { } while (0)
 #endif
 
+extern void mips_ihb(void);
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_HAZARDS_H */
index 774348734fa0b38a8fbe916972a966a59302be17..4bb9c06f44107006f4c6330df7f0aedb23bc96a2 100644 (file)
@@ -19,7 +19,12 @@ __asm__ (
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
-#ifdef CONFIG_CPU_MIPSR2
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    $1, $2, 1       # SMTC - clear TCStatus.IXMT    \n"
+       "       ori     $1, 0x400                                       \n"
+       "       xori    $1, 0x400                                       \n"
+       "       mtc0    $1, $2, 1                                       \n"
+#elif defined(CONFIG_CPU_MIPSR2)
        "       ei                                                      \n"
 #else
        "       mfc0    $1,$12                                          \n"
@@ -62,7 +67,12 @@ __asm__ (
        "       .macro  local_irq_disable\n"
        "       .set    push                                            \n"
        "       .set    noat                                            \n"
-#ifdef CONFIG_CPU_MIPSR2
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    $1, $2, 1                                       \n"
+       "       ori     $1, 0x400                                       \n"
+       "       .set    noreorder                                       \n"
+       "       mtc0    $1, $2, 1                                       \n"
+#elif defined(CONFIG_CPU_MIPSR2)
        "       di                                                      \n"
 #else
        "       mfc0    $1,$12                                          \n"
@@ -88,7 +98,11 @@ __asm__ (
        "       .macro  local_save_flags flags                          \n"
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    \\flags, $2, 1                                  \n"
+#else
        "       mfc0    \\flags, $12                                    \n"
+#endif
        "       .set    pop                                             \n"
        "       .endm                                                   \n");
 
@@ -102,7 +116,13 @@ __asm__ (
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
-#ifdef CONFIG_CPU_MIPSR2
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    \\result, $2, 1                                 \n"
+       "       ori     $1, \\result, 0x400                             \n"
+       "       .set    noreorder                                       \n"
+       "       mtc0    $1, $2, 1                                       \n"
+       "       andi    \\result, \\result, 0x400                       \n"
+#elif defined(CONFIG_CPU_MIPSR2)
        "       di      \\result                                        \n"
        "       andi    \\result, 1                                     \n"
 #else
@@ -128,7 +148,14 @@ __asm__ (
        "       .set    push                                            \n"
        "       .set    noreorder                                       \n"
        "       .set    noat                                            \n"
-#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+#ifdef CONFIG_MIPS_MT_SMTC
+       "mfc0   $1, $2, 1                                               \n"
+       "andi   \\flags, 0x400                                          \n"
+       "ori    $1, 0x400                                               \n"
+       "xori   $1, 0x400                                               \n"
+       "or     \\flags, $1                                             \n"
+       "mtc0   \\flags, $2, 1                                          \n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
        /*
         * Slow, but doesn't suffer from a relativly unlikely race
         * condition we're having since days 1.
@@ -167,11 +194,29 @@ do {                                                                      \
                : "memory");                                            \
 } while(0)
 
-#define irqs_disabled()                                                        \
-({                                                                     \
-       unsigned long flags;                                            \
-       local_save_flags(flags);                                        \
-       !(flags & 1);                                                   \
-})
+static inline int irqs_disabled(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
+        */
+       unsigned long __result;
+
+       __asm__ __volatile__(
+       "       .set    noreorder                                       \n"
+       "       mfc0    %0, $2, 1                                       \n"
+       "       andi    %0, 0x400                                       \n"
+       "       slt     %0, $0, %0                                      \n"
+       "       .set    reorder                                         \n"
+       : "=r" (__result));
+
+       return __result;
+#else
+       unsigned long flags;
+       local_save_flags(flags);
+
+       return !(flags & 1);
+#endif
+}
 
 #endif /* _ASM_INTERRUPT_H */
index d7aecca3b95f35a6f905ba29cae94074dca817d0..dde677f02bc015e1129d3b30a550f707f3b8f61d 100644 (file)
@@ -11,6 +11,9 @@
 
 #include <linux/config.h>
 #include <linux/linkage.h>
+
+#include <asm/mipsmtregs.h>
+
 #include <irq.h>
 
 #ifdef CONFIG_I8259
@@ -26,6 +29,23 @@ struct pt_regs;
 
 extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs);
 
+#ifdef CONFIG_MIPS_MT_SMTC
+/*
+ * Clear interrupt mask handling "backstop" if irq_hwmask
+ * entry so indicates. This implies that the ack() or end()
+ * functions will take over re-enabling the low-level mask.
+ * Otherwise it will be done on return from exception.
+ */
+#define __DO_IRQ_SMTC_HOOK()                                           \
+do {                                                                   \
+       if (irq_hwmask[irq] & 0x0000ff00)                               \
+               write_c0_tccontext(read_c0_tccontext() &                \
+                                  ~(irq_hwmask[irq] & 0x0000ff00));    \
+} while (0)
+#else
+#define __DO_IRQ_SMTC_HOOK() do { } while (0)
+#endif
+
 #ifdef CONFIG_PREEMPT
 
 /*
@@ -39,6 +59,7 @@ extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs);
 #define do_IRQ(irq, regs)                                              \
 do {                                                                   \
        irq_enter();                                                    \
+       __DO_IRQ_SMTC_HOOK();                                           \
        __do_IRQ((irq), (regs));                                        \
        irq_exit();                                                     \
 } while (0)
@@ -48,4 +69,12 @@ do {                                                                 \
 extern void arch_init_irq(void);
 extern void spurious_interrupt(struct pt_regs *regs);
 
+#ifdef CONFIG_MIPS_MT_SMTC
+struct irqaction;
+
+extern unsigned long irq_hwmask[];
+extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
+                          unsigned long hwmask);
+#endif /* CONFIG_MIPS_MT_SMTC */
+
 #endif /* _ASM_IRQ_H */
diff --git a/include/asm-mips/mips_mt.h b/include/asm-mips/mips_mt.h
new file mode 100644 (file)
index 0000000..c31a312
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Definitions and decalrations for MIPS MT support
+ * that are common between SMTC, VSMP, and/or AP/SP
+ * kernel models.
+ */
+#ifndef __ASM_MIPS_MT_H
+#define __ASM_MIPS_MT_H
+
+extern cpumask_t mt_fpu_cpumask;
+extern unsigned long mt_fpemul_threshold;
+
+extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value);
+extern void mips_mt_set_cpuoptions(void);
+
+#endif /* __ASM_MIPS_MT_H */
index a5ac1a62f4f45b1c51be914f8b507ca95ebcae4f..f637ce70758fb363b930f18615726c6205d060d7 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-extern void mips_mt_regdump(void);
+extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value);
 
 static inline unsigned int dvpe(void)
 {
@@ -282,8 +282,11 @@ static inline void ehb(void)
                                                                        \
        __asm__ __volatile__(                                           \
        "       .set    push                                    \n"     \
+       "       .set    noat                                    \n"     \
        "       .set    mips32r2                                \n"     \
-       "       mftgpr  %0," #rt "                              \n"     \
+       "       # mftgpr $1," #rt "                             \n"     \
+       "       .word   0x41000820 | (" #rt " << 16)            \n"     \
+       "       move    %0, $1                                  \n"     \
        "       .set    pop                                     \n"     \
        : "=r" (__res));                                                \
                                                                        \
@@ -295,9 +298,7 @@ static inline void ehb(void)
        unsigned long __res;                                            \
                                                                        \
        __asm__ __volatile__(                                           \
-       ".set noat\n\t"                                                 \
-       "mftr\t%0, " #rt ", " #u ", " #sel "\n\t"                       \
-       ".set at\n\t"                                                   \
+       "       mftr    %0, " #rt ", " #u ", " #sel "           \n"     \
        : "=r" (__res));                                                \
                                                                        \
        __res;                                                          \
index e85a42e2ea0cc752f7a1a09fa6011f078397173f..a2ef579f6b1a26d8e87d3b5c19b405629dfe0b29 100644 (file)
@@ -861,7 +861,19 @@ do {                                                                       \
 #define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val)
 
 #define read_c0_status()       __read_32bit_c0_register($12, 0)
+#ifdef CONFIG_MIPS_MT_SMTC
+#define write_c0_status(val)                                           \
+do {                                                                   \
+       __write_32bit_c0_register($12, 0, val);                         \
+       __ehb();                                                        \
+} while (0)
+#else
+/*
+ * Legacy non-SMTC code, which may be hazardous
+ * but which might not support EHB
+ */
 #define write_c0_status(val)   __write_32bit_c0_register($12, 0, val)
+#endif /* CONFIG_MIPS_MT_SMTC */
 
 #define read_c0_cause()                __read_32bit_c0_register($13, 0)
 #define write_c0_cause(val)    __write_32bit_c0_register($13, 0, val)
@@ -1004,6 +1016,9 @@ do {                                                                      \
 #define read_c0_taglo()                __read_32bit_c0_register($28, 0)
 #define write_c0_taglo(val)    __write_32bit_c0_register($28, 0, val)
 
+#define read_c0_dtaglo()       __read_32bit_c0_register($28, 2)
+#define write_c0_dtaglo(val)   __write_32bit_c0_register($28, 2, val)
+
 #define read_c0_taghi()                __read_32bit_c0_register($29, 0)
 #define write_c0_taghi(val)    __write_32bit_c0_register($29, 0, val)
 
@@ -1357,6 +1372,11 @@ static inline void tlb_write_random(void)
 /*
  * Manipulate bits in a c0 register.
  */
+#ifndef CONFIG_MIPS_MT_SMTC
+/*
+ * SMTC Linux requires shutting-down microthread scheduling
+ * during CP0 register read-modify-write sequences.
+ */
 #define __BUILD_SET_C0(name)                                   \
 static inline unsigned int                                     \
 set_c0_##name(unsigned int set)                                        \
@@ -1395,6 +1415,119 @@ change_c0_##name(unsigned int change, unsigned int new)         \
        return res;                                             \
 }
 
+#else /* SMTC versions that manage MT scheduling */
+
+#include <asm/interrupt.h>
+
+/*
+ * This is a duplicate of dmt() in mipsmtregs.h to avoid problems with
+ * header file recursion.
+ */
+static inline unsigned int __dmt(void)
+{
+       int res;
+
+       __asm__ __volatile__(
+       "       .set    push                                            \n"
+       "       .set    mips32r2                                        \n"
+       "       .set    noat                                            \n"
+       "       .word   0x41610BC1                      # dmt $1        \n"
+       "       ehb                                                     \n"
+       "       move    %0, $1                                          \n"
+       "       .set    pop                                             \n"
+       : "=r" (res));
+
+       instruction_hazard();
+
+       return res;
+}
+
+#define __VPECONTROL_TE_SHIFT  15
+#define __VPECONTROL_TE                (1UL << __VPECONTROL_TE_SHIFT)
+
+#define __EMT_ENABLE           __VPECONTROL_TE
+
+static inline void __emt(unsigned int previous)
+{
+       if ((previous & __EMT_ENABLE))
+               __asm__ __volatile__(
+               "       .set    noreorder                               \n"
+               "       .set    mips32r2                                \n"
+               "       .word   0x41600be1              # emt           \n"
+               "       ehb                                             \n"
+               "       .set    mips0                                   \n"
+               "       .set    reorder                                 \n");
+}
+
+static inline void __ehb(void)
+{
+       __asm__ __volatile__(
+       "       ehb                                                     \n");
+}
+
+/*
+ * Note that local_irq_save/restore affect TC-specific IXMT state,
+ * not Status.IE as in non-SMTC kernel.
+ */
+
+#define __BUILD_SET_C0(name)                                   \
+static inline unsigned int                                     \
+set_c0_##name(unsigned int set)                                        \
+{                                                              \
+       unsigned int res;                                       \
+       unsigned int omt;                                       \
+       unsigned int flags;                                     \
+                                                               \
+       local_irq_save(flags);                                  \
+       omt = __dmt();                                          \
+       res = read_c0_##name();                                 \
+       res |= set;                                             \
+       write_c0_##name(res);                                   \
+       __emt(omt);                                             \
+       local_irq_restore(flags);                               \
+                                                               \
+       return res;                                             \
+}                                                              \
+                                                               \
+static inline unsigned int                                     \
+clear_c0_##name(unsigned int clear)                            \
+{                                                              \
+       unsigned int res;                                       \
+       unsigned int omt;                                       \
+       unsigned int flags;                                     \
+                                                               \
+       local_irq_save(flags);                                  \
+       omt = __dmt();                                          \
+       res = read_c0_##name();                                 \
+       res &= ~clear;                                          \
+       write_c0_##name(res);                                   \
+       __emt(omt);                                             \
+       local_irq_restore(flags);                               \
+                                                               \
+       return res;                                             \
+}                                                              \
+                                                               \
+static inline unsigned int                                     \
+change_c0_##name(unsigned int change, unsigned int new)                \
+{                                                              \
+       unsigned int res;                                       \
+       unsigned int omt;                                       \
+       unsigned int flags;                                     \
+                                                               \
+       local_irq_save(flags);                                  \
+                                                               \
+       omt = __dmt();                                          \
+       res = read_c0_##name();                                 \
+       res &= ~change;                                         \
+       res |= (new & change);                                  \
+       write_c0_##name(res);                                   \
+       __emt(omt);                                             \
+       local_irq_restore(flags);                               \
+                                                               \
+       return res;                                             \
+}
+#endif
+
 __BUILD_SET_C0(status)
 __BUILD_SET_C0(cause)
 __BUILD_SET_C0(config)
index 61cf22588137a3505c3d469c0da618734fbea190..6e09f4c87211017602fe9adba197855294e5dc77 100644 (file)
 #include <linux/slab.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
+#ifdef CONFIG_MIPS_MT_SMTC
+#include <asm/mipsmtregs.h>
+#include <asm/smtc.h>
+#endif /* SMTC */
 
 /*
  * For the fast tlb miss handlers, we keep a per cpu array of pointers
@@ -54,6 +58,14 @@ extern unsigned long pgd_current[];
 #define ASID_INC       0x1
 #define ASID_MASK      0xfff
 
+/* SMTC/34K debug hack - but maybe we'll keep it */
+#elif defined(CONFIG_MIPS_MT_SMTC)
+
+#define ASID_INC       0x1
+extern unsigned long smtc_asid_mask;
+#define ASID_MASK      (smtc_asid_mask)
+#define        HW_ASID_MASK    0xff
+/* End SMTC/34K debug hack */
 #else /* FIXME: not correct for R6000 */
 
 #define ASID_INC       0x1
@@ -76,6 +88,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 #define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
 #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
 
+#ifndef CONFIG_MIPS_MT_SMTC
+/* Normal, classic MIPS get_new_mmu_context */
 static inline void
 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
 {
@@ -91,6 +105,12 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
        cpu_context(cpu, mm) = asid_cache(cpu) = asid;
 }
 
+#else /* CONFIG_MIPS_MT_SMTC */
+
+#define get_new_mmu_context(mm,cpu) smtc_get_new_mmu_context((mm),(cpu))
+
+#endif /* CONFIG_MIPS_MT_SMTC */
+
 /*
  * Initialize the context related info for a new mm_struct
  * instance.
@@ -111,14 +131,46 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 {
        unsigned int cpu = smp_processor_id();
        unsigned long flags;
-
+#ifdef CONFIG_MIPS_MT_SMTC
+       unsigned long oldasid;
+       unsigned long mtflags;
+       int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
        local_irq_save(flags);
+       mtflags = dvpe();
+#else /* Not SMTC */
+       local_irq_save(flags);
+#endif /* CONFIG_MIPS_MT_SMTC */
 
        /* Check if our ASID is of an older version and thus invalid */
        if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
                get_new_mmu_context(next, cpu);
-
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * If the EntryHi ASID being replaced happens to be
+        * the value flagged at ASID recycling time as having
+        * an extended life, clear the bit showing it being
+        * in use by this "CPU", and if that's the last bit,
+        * free up the ASID value for use and flush any old
+        * instances of it from the TLB.
+        */
+       oldasid = (read_c0_entryhi() & ASID_MASK);
+       if(smtc_live_asid[mytlb][oldasid]) {
+               smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
+               if(smtc_live_asid[mytlb][oldasid] == 0)
+                       smtc_flush_tlb_asid(oldasid);
+       }
+       /*
+        * Tread softly on EntryHi, and so long as we support
+        * having ASID_MASK smaller than the hardware maximum,
+        * make sure no "soft" bits become "hard"...
+        */
+       write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
+                       | (cpu_context(cpu, next) & ASID_MASK));
+       ehb(); /* Make sure it propagates to TCStatus */
+       evpe(mtflags);
+#else
        write_c0_entryhi(cpu_context(cpu, next));
+#endif /* CONFIG_MIPS_MT_SMTC */
        TLBMISS_HANDLER_SETUP_PGD(next->pgd);
 
        /*
@@ -151,12 +203,34 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
        unsigned long flags;
        unsigned int cpu = smp_processor_id();
 
+#ifdef CONFIG_MIPS_MT_SMTC
+       unsigned long oldasid;
+       unsigned long mtflags;
+       int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
+#endif /* CONFIG_MIPS_MT_SMTC */
+
        local_irq_save(flags);
 
        /* Unconditionally get a new ASID.  */
        get_new_mmu_context(next, cpu);
 
+#ifdef CONFIG_MIPS_MT_SMTC
+       /* See comments for similar code above */
+       mtflags = dvpe();
+       oldasid = read_c0_entryhi() & ASID_MASK;
+       if(smtc_live_asid[mytlb][oldasid]) {
+               smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
+                       if(smtc_live_asid[mytlb][oldasid] == 0)
+                                smtc_flush_tlb_asid(oldasid);
+       }
+       /* See comments for similar code above */
+       write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
+                        (cpu_context(cpu, next) & ASID_MASK));
+       ehb(); /* Make sure it propagates to TCStatus */
+       evpe(mtflags);
+#else
        write_c0_entryhi(cpu_context(cpu, next));
+#endif /* CONFIG_MIPS_MT_SMTC */
        TLBMISS_HANDLER_SETUP_PGD(next->pgd);
 
        /* mark mmu ownership change */
@@ -174,17 +248,49 @@ static inline void
 drop_mmu_context(struct mm_struct *mm, unsigned cpu)
 {
        unsigned long flags;
+#ifdef CONFIG_MIPS_MT_SMTC
+       unsigned long oldasid;
+       /* Can't use spinlock because called from TLB flush within DVPE */
+       unsigned int prevvpe;
+       int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
+#endif /* CONFIG_MIPS_MT_SMTC */
 
        local_irq_save(flags);
 
        if (cpu_isset(cpu, mm->cpu_vm_mask))  {
                get_new_mmu_context(mm, cpu);
+#ifdef CONFIG_MIPS_MT_SMTC
+               /* See comments for similar code above */
+               prevvpe = dvpe();
+               oldasid = (read_c0_entryhi() & ASID_MASK);
+               if(smtc_live_asid[mytlb][oldasid]) {
+                 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
+                 if(smtc_live_asid[mytlb][oldasid] == 0)
+                       smtc_flush_tlb_asid(oldasid);
+               }
+               /* See comments for similar code above */
+               write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
+                               | cpu_asid(cpu, mm));
+               ehb(); /* Make sure it propagates to TCStatus */
+               evpe(prevvpe);
+#else /* not CONFIG_MIPS_MT_SMTC */
                write_c0_entryhi(cpu_asid(cpu, mm));
+#endif /* CONFIG_MIPS_MT_SMTC */
        } else {
                /* will get a new context next time */
+#ifndef CONFIG_MIPS_MT_SMTC
                cpu_context(cpu, mm) = 0;
+#else /* SMTC */
+               int i;
+
+               /* SMTC shares the TLB (and ASIDs) across VPEs */
+               for (i = 0; i < num_online_cpus(); i++) {
+                   if((smtc_status & SMTC_TLB_SHARED)
+                   || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
+                       cpu_context(i, mm) = 0;
+               }
+#endif /* CONFIG_MIPS_MT_SMTC */
        }
-
        local_irq_restore(flags);
 }
 
index 39d2bd50fecede26d52090657e08b5fe71e29f39..786651340de18d540d9603fbdcac59e16403010e 100644 (file)
@@ -12,6 +12,7 @@
 #define _ASM_PROCESSOR_H
 
 #include <linux/config.h>
+#include <linux/cpumask.h>
 #include <linux/threads.h>
 
 #include <asm/cachectl.h>
@@ -107,6 +108,10 @@ struct mips_dsp_state {
 
 #define INIT_DSP {{0,},}
 
+#define INIT_CPUMASK { \
+       {0,} \
+}
+
 typedef struct {
        unsigned long seg;
 } mm_segment_t;
@@ -142,6 +147,7 @@ struct thread_struct {
 #define MF_LOGADE      2               /* Log address errors to syslog */
 #define MF_32BIT_REGS  4               /* also implies 16/32 fprs */
 #define MF_32BIT_ADDR  8               /* 32-bit address space (o32/n32) */
+#define MF_FPUBOUND    0x10            /* thread bound to FPU-full CPU set */
        unsigned long mflags;
        unsigned long irix_trampoline;  /* Wheee... */
        unsigned long irix_oldctx;
index 95c5839ac4657dac29ebc30c03d05774ea677c8b..fa9d8713c12a2615ac0e1c89ff3b22f9be683797 100644 (file)
@@ -45,6 +45,10 @@ struct pt_regs {
        unsigned long cp0_badvaddr;
        unsigned long cp0_cause;
        unsigned long cp0_epc;
+#ifdef CONFIG_MIPS_MT_SMTC
+       unsigned long cp0_tcstatus;
+       unsigned long smtc_pad;
+#endif /* CONFIG_MIPS_MT_SMTC */
 };
 
 /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
index 2f2eb95387f6a476520c5ec95a0dbfc8c6eadc87..3c8e3c8d1a9a5da1a3aab928ceb3bc92a4685cf8 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/asm.h>
 #include <asm/cacheops.h>
 #include <asm/cpu-features.h>
+#include <asm/mipsmtregs.h>
 
 /*
  * This macro return a properly sign-extended address suitable as base address
        :                                                               \
        : "i" (op), "R" (*(unsigned char *)(addr)))
 
+#ifdef CONFIG_MIPS_MT
+/*
+ * Temporary hacks for SMTC debug. Optionally force single-threaded
+ * execution during I-cache flushes.
+ */
+
+#define PROTECT_CACHE_FLUSHES 1
+
+#ifdef PROTECT_CACHE_FLUSHES
+
+extern int mt_protiflush;
+extern int mt_protdflush;
+extern void mt_cflush_lockdown(void);
+extern void mt_cflush_release(void);
+
+#define BEGIN_MT_IPROT \
+       unsigned long flags = 0;                        \
+       unsigned long mtflags = 0;                      \
+       if(mt_protiflush) {                             \
+               local_irq_save(flags);                  \
+               ehb();                                  \
+               mtflags = dvpe();                       \
+               mt_cflush_lockdown();                   \
+       }
+
+#define END_MT_IPROT \
+       if(mt_protiflush) {                             \
+               mt_cflush_release();                    \
+               evpe(mtflags);                          \
+               local_irq_restore(flags);               \
+       }
+
+#define BEGIN_MT_DPROT \
+       unsigned long flags = 0;                        \
+       unsigned long mtflags = 0;                      \
+       if(mt_protdflush) {                             \
+               local_irq_save(flags);                  \
+               ehb();                                  \
+               mtflags = dvpe();                       \
+               mt_cflush_lockdown();                   \
+       }
+
+#define END_MT_DPROT \
+       if(mt_protdflush) {                             \
+               mt_cflush_release();                    \
+               evpe(mtflags);                          \
+               local_irq_restore(flags);               \
+       }
+
+#else
+
+#define BEGIN_MT_IPROT
+#define BEGIN_MT_DPROT
+#define END_MT_IPROT
+#define END_MT_DPROT
+
+#endif /* PROTECT_CACHE_FLUSHES */
+
+#define __iflush_prologue                                              \
+       unsigned long redundance;                                       \
+       extern int mt_n_iflushes;                                       \
+       BEGIN_MT_IPROT                                                  \
+       for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
+
+#define __iflush_epilogue                                              \
+       END_MT_IPROT                                                    \
+       }
+
+#define __dflush_prologue                                              \
+       unsigned long redundance;                                       \
+       extern int mt_n_dflushes;                                       \
+       BEGIN_MT_DPROT                                                  \
+       for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
+
+#define __dflush_epilogue \
+       END_MT_DPROT     \
+       }
+
+#define __inv_dflush_prologue __dflush_prologue
+#define __inv_dflush_epilogue __dflush_epilogue
+#define __sflush_prologue {
+#define __sflush_epilogue }
+#define __inv_sflush_prologue __sflush_prologue
+#define __inv_sflush_epilogue __sflush_epilogue
+
+#else /* CONFIG_MIPS_MT */
+
+#define __iflush_prologue {
+#define __iflush_epilogue }
+#define __dflush_prologue {
+#define __dflush_epilogue }
+#define __inv_dflush_prologue {
+#define __inv_dflush_epilogue }
+#define __sflush_prologue {
+#define __sflush_epilogue }
+#define __inv_sflush_prologue {
+#define __inv_sflush_epilogue }
+
+#endif /* CONFIG_MIPS_MT */
+
 static inline void flush_icache_line_indexed(unsigned long addr)
 {
+       __iflush_prologue
        cache_op(Index_Invalidate_I, addr);
+       __iflush_epilogue
 }
 
 static inline void flush_dcache_line_indexed(unsigned long addr)
 {
+       __dflush_prologue
        cache_op(Index_Writeback_Inv_D, addr);
+       __dflush_epilogue
 }
 
 static inline void flush_scache_line_indexed(unsigned long addr)
@@ -56,17 +161,23 @@ static inline void flush_scache_line_indexed(unsigned long addr)
 
 static inline void flush_icache_line(unsigned long addr)
 {
+       __iflush_prologue
        cache_op(Hit_Invalidate_I, addr);
+       __iflush_epilogue
 }
 
 static inline void flush_dcache_line(unsigned long addr)
 {
+       __dflush_prologue
        cache_op(Hit_Writeback_Inv_D, addr);
+       __dflush_epilogue
 }
 
 static inline void invalidate_dcache_line(unsigned long addr)
 {
+       __dflush_prologue
        cache_op(Hit_Invalidate_D, addr);
+       __dflush_epilogue
 }
 
 static inline void invalidate_scache_line(unsigned long addr)
@@ -239,9 +350,13 @@ static inline void blast_##pfx##cache##lsize(void)                 \
                               current_cpu_data.desc.waybit;            \
        unsigned long ws, addr;                                         \
                                                                        \
+       __##pfx##flush_prologue                                         \
+                                                                       \
        for (ws = 0; ws < ws_end; ws += ws_inc)                         \
                for (addr = start; addr < end; addr += lsize * 32)      \
                        cache##lsize##_unroll32(addr|ws,indexop);       \
+                                                                       \
+       __##pfx##flush_epilogue                                         \
 }                                                                      \
                                                                        \
 static inline void blast_##pfx##cache##lsize##_page(unsigned long page)        \
@@ -249,10 +364,14 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page)   \
        unsigned long start = page;                                     \
        unsigned long end = page + PAGE_SIZE;                           \
                                                                        \
+       __##pfx##flush_prologue                                         \
+                                                                       \
        do {                                                            \
                cache##lsize##_unroll32(start,hitop);                   \
                start += lsize * 32;                                    \
        } while (start < end);                                          \
+                                                                       \
+       __##pfx##flush_epilogue                                         \
 }                                                                      \
                                                                        \
 static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
@@ -265,9 +384,13 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page)
                               current_cpu_data.desc.waybit;            \
        unsigned long ws, addr;                                         \
                                                                        \
+       __##pfx##flush_prologue                                         \
+                                                                       \
        for (ws = 0; ws < ws_end; ws += ws_inc)                         \
                for (addr = start; addr < end; addr += lsize * 32)      \
                        cache##lsize##_unroll32(addr|ws,indexop);       \
+                                                                       \
+       __##pfx##flush_epilogue                                         \
 }
 
 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
@@ -288,12 +411,17 @@ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
        unsigned long lsize = cpu_##desc##_line_size();                 \
        unsigned long addr = start & ~(lsize - 1);                      \
        unsigned long aend = (end - 1) & ~(lsize - 1);                  \
+                                                                       \
+       __##pfx##flush_prologue                                         \
+                                                                       \
        while (1) {                                                     \
                prot##cache_op(hitop, addr);                            \
                if (addr == aend)                                       \
                        break;                                          \
                addr += lsize;                                          \
        }                                                               \
+                                                                       \
+       __##pfx##flush_epilogue                                         \
 }
 
 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h
new file mode 100644 (file)
index 0000000..e1941d1
--- /dev/null
@@ -0,0 +1,55 @@
+#ifndef _ASM_SMTC_MT_H
+#define _ASM_SMTC_MT_H
+
+/*
+ * Definitions for SMTC multitasking on MIPS MT cores
+ */
+
+#include <asm/mips_mt.h>
+
+/*
+ * System-wide SMTC status information
+ */
+
+extern unsigned int smtc_status;
+
+#define SMTC_TLB_SHARED        0x00000001
+#define SMTC_MTC_ACTIVE        0x00000002
+
+/*
+ * TLB/ASID Management information
+ */
+
+#define MAX_SMTC_TLBS 2
+#define MAX_SMTC_ASIDS 256
+#if NR_CPUS <= 8
+typedef char asiduse;
+#else
+#if NR_CPUS <= 16
+typedef short asiduse;
+#else
+typedef long asiduse;
+#endif
+#endif
+
+extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
+
+void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
+
+void smtc_flush_tlb_asid(unsigned long asid);
+extern int mipsmt_build_cpu_map(int startslot);
+extern void mipsmt_prepare_cpus(void);
+extern void smtc_smp_finish(void);
+extern void smtc_boot_secondary(int cpu, struct task_struct *t);
+
+/*
+ * Sharing the TLB between multiple VPEs means that the
+ * "random" index selection function is not allowed to
+ * select the current value of the Index register. To
+ * avoid additional TLB pressure, the Index registers
+ * are "parked" with an non-Valid value.
+ */
+
+#define PARKED_INDEX   ((unsigned int)0x80000000)
+
+#endif /*  _ASM_SMTC_MT_H */
diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h
new file mode 100644 (file)
index 0000000..f22c3e2
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code.
+ */
+#ifndef __ASM_SMTC_IPI_H
+#define __ASM_SMTC_IPI_H
+
+//#define SMTC_IPI_DEBUG
+
+#ifdef SMTC_IPI_DEBUG
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#endif /* SMTC_IPI_DEBUG */
+
+/*
+ * An IPI "message"
+ */
+
+struct smtc_ipi {
+       struct smtc_ipi *flink;
+       int type;
+       void *arg;
+       int dest;
+#ifdef SMTC_IPI_DEBUG
+       int sender;
+       long stamp;
+#endif /* SMTC_IPI_DEBUG */
+};
+
+/*
+ * Defined IPI Types
+ */
+
+#define LINUX_SMP_IPI 1
+#define SMTC_CLOCK_TICK 2
+
+/*
+ * A queue of IPI messages
+ */
+
+struct smtc_ipi_q {
+       struct smtc_ipi *head;
+       spinlock_t lock;
+       struct smtc_ipi *tail;
+       int depth;
+};
+
+extern struct smtc_ipi_q IPIQ[NR_CPUS];
+extern struct smtc_ipi_q freeIPIq;
+
+static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p)
+{
+       long flags;
+
+       spin_lock_irqsave(&q->lock, flags);
+       if (q->head == NULL)
+               q->head = q->tail = p;
+       else
+               q->tail->flink = p;
+       p->flink = NULL;
+       q->tail = p;
+       q->depth++;
+#ifdef SMTC_IPI_DEBUG
+       p->sender = read_c0_tcbind();
+       p->stamp = read_c0_count();
+#endif /* SMTC_IPI_DEBUG */
+       spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q)
+{
+       struct smtc_ipi *p;
+       long flags;
+
+       spin_lock_irqsave(&q->lock, flags);
+       if (q->head == NULL)
+               p = NULL;
+       else {
+               p = q->head;
+               q->head = q->head->flink;
+               q->depth--;
+               /* Arguably unnecessary, but leaves queue cleaner */
+               if (q->head == NULL)
+                       q->tail = NULL;
+       }
+       spin_unlock_irqrestore(&q->lock, flags);
+       return p;
+}
+
+static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p)
+{
+       long flags;
+
+       spin_lock_irqsave(&q->lock, flags);
+       if (q->head == NULL) {
+               q->head = q->tail = p;
+               p->flink = NULL;
+       } else {
+               p->flink = q->head;
+               q->head = p;
+       }
+       q->depth++;
+       spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q)
+{
+       long flags;
+       int retval;
+
+       spin_lock_irqsave(&q->lock, flags);
+       retval = q->depth;
+       spin_unlock_irqrestore(&q->lock, flags);
+       return retval;
+}
+
+extern void smtc_send_ipi(int cpu, int type, unsigned int action);
+
+#endif /* __ASM_SMTC_IPI_H */
diff --git a/include/asm-mips/smtc_proc.h b/include/asm-mips/smtc_proc.h
new file mode 100644 (file)
index 0000000..25da651
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Definitions for SMTC /proc entries
+ * Copyright(C) 2005 MIPS Technologies Inc.
+ */
+#ifndef __ASM_SMTC_PROC_H
+#define __ASM_SMTC_PROC_H
+
+/*
+ * per-"CPU" statistics
+ */
+
+struct smtc_cpu_proc {
+       unsigned long timerints;
+       unsigned long selfipis;
+};
+
+extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
+
+/* Count of number of recoveries of "stolen" FPU access rights on 34K */
+
+extern atomic_t smtc_fpu_recoveries;
+
+#endif /* __ASM_SMTC_PROC_H */
index 2acf3e844f0001951dcad3ed848eccdde9cba824..c4856a874965f3ad851808c9ec7886fd45bcd2a7 100644 (file)
 #include <linux/threads.h>
 
 #include <asm/asm.h>
+#include <asm/asmmacro.h>
 #include <asm/mipsregs.h>
 #include <asm/asm-offsets.h>
 
+#ifdef CONFIG_MIPS_MT_SMTC
+#include <asm/mipsmtregs.h>
+#endif /* CONFIG_MIPS_MT_SMTC */
+
                .macro  SAVE_AT
                .set    push
                .set    noat
 #ifdef CONFIG_SMP
                .macro  get_saved_sp    /* SMP variation */
 #ifdef CONFIG_32BIT
+#ifdef CONFIG_MIPS_MT_SMTC
+               .set    mips32
+               mfc0    k0, CP0_TCBIND;
+               .set    mips0
+               lui     k1, %hi(kernelsp)
+               srl     k0, k0, 19
+               /* No need to shift down and up to clear bits 0-1 */
+#else
                mfc0    k0, CP0_CONTEXT
                lui     k1, %hi(kernelsp)
                srl     k0, k0, 23
+#endif
                addu    k1, k0
                LONG_L  k1, %lo(kernelsp)(k1)
 #endif
 #ifdef CONFIG_64BIT
+#ifdef CONFIG_MIPS_MT_SMTC
+               .set    mips64
+               mfc0    k0, CP0_TCBIND;
+               .set    mips0
+               lui     k0, %highest(kernelsp)
+               dsrl    k1, 19
+               /* No need to shift down and up to clear bits 0-2 */
+#else
                MFC0    k1, CP0_CONTEXT
                lui     k0, %highest(kernelsp)
                dsrl    k1, 23
                dsll    k0, k0, 16
                daddiu  k0, %hi(kernelsp)
                dsll    k0, k0, 16
+#endif /* CONFIG_MIPS_MT_SMTC */
                daddu   k1, k1, k0
                LONG_L  k1, %lo(kernelsp)(k1)
-#endif
+#endif /* CONFIG_64BIT */
                .endm
 
                .macro  set_saved_sp stackp temp temp2
 #ifdef CONFIG_32BIT
+#ifdef CONFIG_MIPS_MT_SMTC
+               mfc0    \temp, CP0_TCBIND
+               srl     \temp, 19
+#else
                mfc0    \temp, CP0_CONTEXT
                srl     \temp, 23
 #endif
+#endif
 #ifdef CONFIG_64BIT
+#ifdef CONFIG_MIPS_MT_SMTC
+               mfc0    \temp, CP0_TCBIND
+               dsrl    \temp, 19
+#else
                MFC0    \temp, CP0_CONTEXT
                dsrl    \temp, 23
+#endif
 #endif
                LONG_S  \stackp, kernelsp(\temp)
                .endm
                PTR_SUBU sp, k1, PT_SIZE
                LONG_S  k0, PT_R29(sp)
                LONG_S  $3, PT_R3(sp)
+               /*
+                * You might think that you don't need to save $0,
+                * but the FPU emulator and gdb remote debug stub
+                * need it to operate correctly
+                */
                LONG_S  $0, PT_R0(sp)
                mfc0    v1, CP0_STATUS
                LONG_S  $2, PT_R2(sp)
                LONG_S  v1, PT_STATUS(sp)
+#ifdef CONFIG_MIPS_MT_SMTC
+               /*
+                * Ideally, these instructions would be shuffled in
+                * to cover the pipeline delay.
+                */
+               .set    mips32
+               mfc0    v1, CP0_TCSTATUS
+               .set    mips0
+               LONG_S  v1, PT_TCSTATUS(sp)
+#endif /* CONFIG_MIPS_MT_SMTC */
                LONG_S  $4, PT_R4(sp)
                mfc0    v1, CP0_CAUSE
                LONG_S  $5, PT_R5(sp)
                .endm
 
 #else
+/*
+ * For SMTC kernel, global IE should be left set, and interrupts
+ * controlled exclusively via IXMT.
+ */
 
+#ifdef CONFIG_MIPS_MT_SMTC
+#define STATMASK 0x1e
+#else
+#define STATMASK 0x1f
+#endif
                .macro  RESTORE_SOME
                .set    push
                .set    reorder
                .set    noat
+#ifdef CONFIG_MIPS_MT_SMTC
+               .set    mips32r2
+               /*
+                * This may not really be necessary if ints are already
+                * inhibited here.
+                */
+               mfc0    v0, CP0_TCSTATUS
+               ori     v0, TCSTATUS_IXMT
+               mtc0    v0, CP0_TCSTATUS
+               ehb
+               DMT     5                               # dmt a1
+               jal     mips_ihb
+#endif /* CONFIG_MIPS_MT_SMTC */
                mfc0    a0, CP0_STATUS
-               ori     a0, 0x1f
-               xori    a0, 0x1f
+               ori     a0, STATMASK
+               xori    a0, STATMASK
                mtc0    a0, CP0_STATUS
                li      v1, 0xff00
                and     a0, v1
                and     v0, v1
                or      v0, a0
                mtc0    v0, CP0_STATUS
+#ifdef CONFIG_MIPS_MT_SMTC
+/*
+ * Only after EXL/ERL have been restored to status can we
+ * restore TCStatus.IXMT.
+ */
+               LONG_L  v1, PT_TCSTATUS(sp)
+               ehb
+               mfc0    v0, CP0_TCSTATUS
+               andi    v1, TCSTATUS_IXMT
+               /* We know that TCStatua.IXMT should be set from above */
+               xori    v0, v0, TCSTATUS_IXMT
+               or      v0, v0, v1
+               mtc0    v0, CP0_TCSTATUS
+               ehb
+               andi    a1, a1, VPECONTROL_TE
+               beqz    a1, 1f
+               emt
+1:
+               .set    mips0
+#endif /* CONFIG_MIPS_MT_SMTC */
                LONG_L  v1, PT_EPC(sp)
                MTC0    v1, CP0_EPC
                LONG_L  $31, PT_R31(sp)
  * Set cp0 enable bit as sign that we're running on the kernel stack
  */
                .macro  CLI
+#if !defined(CONFIG_MIPS_MT_SMTC)
                mfc0    t0, CP0_STATUS
                li      t1, ST0_CU0 | 0x1f
                or      t0, t1
                xori    t0, 0x1f
                mtc0    t0, CP0_STATUS
+#else /* CONFIG_MIPS_MT_SMTC */
+               /*
+                * For SMTC, we need to set privilege
+                * and disable interrupts only for the
+                * current TC, using the TCStatus register.
+                */
+               mfc0    t0,CP0_TCSTATUS
+               /* Fortunately CU 0 is in the same place in both registers */
+               /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
+               li      t1, ST0_CU0 | 0x08001c00
+               or      t0,t1
+               /* Clear TKSU, leave IXMT */
+               xori    t0, 0x00001800
+               mtc0    t0, CP0_TCSTATUS
+               ehb
+               /* We need to leave the global IE bit set, but clear EXL...*/
+               mfc0    t0, CP0_STATUS
+               ori     t0, ST0_EXL | ST0_ERL
+               xori    t0, ST0_EXL | ST0_ERL
+               mtc0    t0, CP0_STATUS
+#endif /* CONFIG_MIPS_MT_SMTC */
                irq_disable_hazard
                .endm
 
  * Set cp0 enable bit as sign that we're running on the kernel stack
  */
                .macro  STI
+#if !defined(CONFIG_MIPS_MT_SMTC)
                mfc0    t0, CP0_STATUS
                li      t1, ST0_CU0 | 0x1f
                or      t0, t1
                xori    t0, 0x1e
                mtc0    t0, CP0_STATUS
+#else /* CONFIG_MIPS_MT_SMTC */
+               /*
+                * For SMTC, we need to set privilege
+                * and enable interrupts only for the
+                * current TC, using the TCStatus register.
+                */
+               ehb
+               mfc0    t0,CP0_TCSTATUS
+               /* Fortunately CU 0 is in the same place in both registers */
+               /* Set TCU0, TKSU (for later inversion) and IXMT */
+               li      t1, ST0_CU0 | 0x08001c00
+               or      t0,t1
+               /* Clear TKSU *and* IXMT */
+               xori    t0, 0x00001c00
+               mtc0    t0, CP0_TCSTATUS
+               ehb
+               /* We need to leave the global IE bit set, but clear EXL...*/
+               mfc0    t0, CP0_STATUS
+               ori     t0, ST0_EXL
+               xori    t0, ST0_EXL
+               mtc0    t0, CP0_STATUS
+               /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
+#endif /* CONFIG_MIPS_MT_SMTC */
                irq_enable_hazard
                .endm
 
  * Set cp0 enable bit as sign that we're running on the kernel stack
  */
                .macro  KMODE
+#ifdef CONFIG_MIPS_MT_SMTC
+               /*
+                * This gets baroque in SMTC.  We want to
+                * protect the non-atomic clearing of EXL
+                * with DMT/EMT, but we don't want to take
+                * an interrupt while DMT is still in effect.
+                */
+
+               /* KMODE gets invoked from both reorder and noreorder code */
+               .set    push
+               .set    mips32r2
+               .set    noreorder
+               mfc0    v0, CP0_TCSTATUS
+               andi    v1, v0, TCSTATUS_IXMT
+               ori     v0, TCSTATUS_IXMT
+               mtc0    v0, CP0_TCSTATUS
+               ehb
+               DMT     2                               # dmt   v0
+               /*
+                * We don't know a priori if ra is "live"
+                */
+               move    t0, ra
+               jal     mips_ihb
+               nop     /* delay slot */
+               move    ra, t0
+#endif /* CONFIG_MIPS_MT_SMTC */
                mfc0    t0, CP0_STATUS
                li      t1, ST0_CU0 | 0x1e
                or      t0, t1
                xori    t0, 0x1e
                mtc0    t0, CP0_STATUS
+#ifdef CONFIG_MIPS_MT_SMTC
+               ehb
+               andi    v0, v0, VPECONTROL_TE
+               beqz    v0, 2f
+               nop     /* delay slot */
+               emt
+2:
+               mfc0    v0, CP0_TCSTATUS
+               /* Clear IXMT, then OR in previous value */
+               ori     v0, TCSTATUS_IXMT
+               xori    v0, TCSTATUS_IXMT
+               or      v0, v1, v0
+               mtc0    v0, CP0_TCSTATUS
+               /*
+                * irq_disable_hazard below should expand to EHB
+                * on 24K/34K CPUS
+                */
+               .set pop
+#endif /* CONFIG_MIPS_MT_SMTC */
                irq_disable_hazard
                .endm