#define EXT_INTC1_REQ1 5 /* IP 5 */
#define MIPS_TIMER_IP 7 /* IP 7 */
-extern void mips_timer_interrupt(void);
-
void (*board_init_irq)(void);
static DEFINE_SPINLOCK(irq_lock);
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
if (pending & CAUSEF_IP7)
- mips_timer_interrupt();
+ ll_timer_interrupt(63);
else if (pending & CAUSEF_IP2)
intc0_req0_irqdispatch();
else if (pending & CAUSEF_IP3)
static DEFINE_SPINLOCK(time_lock);
-static inline void ack_r4ktimer(unsigned long newval)
-{
- write_c0_compare(newval);
-}
-
-/*
- * There are a lot of conceptually broken versions of the MIPS timer interrupt
- * handler floating around. This one is rather different, but the algorithm
- * is provably more robust.
- */
unsigned long wtimer;
-void mips_timer_interrupt(void)
-{
- int irq = 63;
-
- irq_enter();
- kstat_this_cpu.irqs[irq]++;
-
- if (r4k_offset == 0)
- goto null;
-
- do {
- kstat_this_cpu.irqs[irq]++;
- do_timer(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
- r4k_cur += r4k_offset;
- ack_r4ktimer(r4k_cur);
-
- } while (((unsigned long)read_c0_count()
- - r4k_cur) < 0x7fffffff);
-
- irq_exit();
- return;
-
-null:
- ack_r4ktimer(0);
- irq_exit();
-}
-
#ifdef CONFIG_PM
irqreturn_t counter0_irq(int irq, void *dev_id)
{
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
clock_hang_reported[dest_copy] = 0;
#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
- local_timer_interrupt(0, NULL);
+ local_timer_interrupt(0);
irq_exit();
break;
case LINUX_SMP_IPI:
* High-level timer interrupt service routines. This function
* is set as irqaction->handler and is invoked through do_IRQ.
*/
-irqreturn_t timer_interrupt(int irq, void *dev_id)
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
write_seqlock(&xtime_lock);
return 0;
}
+EXPORT_SYMBOL(null_perf_irq);
+
int (*perf_irq)(void) = null_perf_irq;
-EXPORT_SYMBOL(null_perf_irq);
EXPORT_SYMBOL(perf_irq);
/*
!r2;
}
-asmlinkage void ll_timer_interrupt(int irq)
+void ll_timer_interrupt(int irq, void *dev_id)
{
- int r2 = cpu_has_mips_r2;
+ int cpu = smp_processor_id();
- irq_enter();
- kstat_this_cpu.irqs[irq]++;
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * In an SMTC system, one Count/Compare set exists per VPE.
+ * Which TC within a VPE gets the interrupt is essentially
+ * random - we only know that it shouldn't be one with
+ * IXMT set. Whichever TC gets the interrupt needs to
+ * send special interprocessor interrupts to the other
+ * TCs to make sure that they schedule, etc.
+ *
+ * That code is specific to the SMTC kernel, not to
+ * the a particular platform, so it's invoked from
+ * the general MIPS timer_interrupt routine.
+ */
+
+ /*
+ * We could be here due to timer interrupt,
+ * perf counter overflow, or both.
+ */
+ (void) handle_perf_irq(1);
+
+ if (read_c0_cause() & (1 << 30)) {
+ /*
+ * There are things we only want to do once per tick
+ * in an "MP" system. One TC of each VPE will take
+ * the actual timer interrupt. The others will get
+ * timer broadcast IPIs. We use whoever it is that takes
+ * the tick on VPE 0 to run the full timer_interrupt().
+ */
+ if (cpu_data[cpu].vpe_id == 0) {
+ timer_interrupt(irq, NULL);
+ } else {
+ write_c0_compare(read_c0_count() +
+ (mips_hpt_frequency/HZ));
+ local_timer_interrupt(irq, dev_id);
+ }
+ smtc_timer_broadcast(cpu_data[cpu].vpe_id);
+ }
+#else /* CONFIG_MIPS_MT_SMTC */
+ int r2 = cpu_has_mips_r2;
if (handle_perf_irq(r2))
- goto out;
+ return;
if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
- goto out;
-
- timer_interrupt(irq, NULL);
-
-out:
- irq_exit();
-}
-
-asmlinkage void ll_local_timer_interrupt(int irq)
-{
- irq_enter();
- if (smp_processor_id() != 0)
- kstat_this_cpu.irqs[irq]++;
-
- /* we keep interrupt disabled all the time */
- local_timer_interrupt(irq, NULL);
+ return;
- irq_exit();
+ if (cpu == 0) {
+ /*
+ * CPU 0 handles the global timer interrupt job and process
+ * accounting resets count/compare registers to trigger next
+ * timer int.
+ */
+ timer_interrupt(irq, NULL);
+ } else {
+ /* Everyone else needs to reset the timer int here as
+ ll_local_timer_interrupt doesn't */
+ /*
+ * FIXME: need to cope with counter underflow.
+ * More support needs to be added to kernel/time for
+ * counter/timer interrupts on multiple CPU's
+ */
+ write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));
+
+ /*
+ * Other CPUs should do profiling and process accounting
+ */
+ local_timer_interrupt(irq, dev_id);
+ }
+#endif /* CONFIG_MIPS_MT_SMTC */
}
/*
do_IRQ(cp0_perfcount_irq);
}
-/*
- * Redeclare until I get around mopping the timer code insanity on MIPS.
- */
-extern int null_perf_irq(void);
-
-extern int (*perf_irq)(void);
-
-/*
- * Possibly handle a performance counter interrupt.
- * Return true if the timer interrupt should not be checked
- */
-static inline int handle_perf_irq (int r2)
-{
- /*
- * The performance counter overflow interrupt may be shared with the
- * timer interrupt (cp0_perfcount_irq < 0). If it is and a
- * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
- * and we can't reliably determine if a counter interrupt has also
- * happened (!r2) then don't check for a timer interrupt.
- */
- return (cp0_perfcount_irq < 0) &&
- perf_irq() == IRQ_HANDLED &&
- !r2;
-}
-
-irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
-{
- int cpu = smp_processor_id();
-
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * In an SMTC system, one Count/Compare set exists per VPE.
- * Which TC within a VPE gets the interrupt is essentially
- * random - we only know that it shouldn't be one with
- * IXMT set. Whichever TC gets the interrupt needs to
- * send special interprocessor interrupts to the other
- * TCs to make sure that they schedule, etc.
- *
- * That code is specific to the SMTC kernel, not to
- * the a particular platform, so it's invoked from
- * the general MIPS timer_interrupt routine.
- */
-
- /*
- * We could be here due to timer interrupt,
- * perf counter overflow, or both.
- */
- (void) handle_perf_irq(1);
-
- if (read_c0_cause() & (1 << 30)) {
- /*
- * There are things we only want to do once per tick
- * in an "MP" system. One TC of each VPE will take
- * the actual timer interrupt. The others will get
- * timer broadcast IPIs. We use whoever it is that takes
- * the tick on VPE 0 to run the full timer_interrupt().
- */
- if (cpu_data[cpu].vpe_id == 0) {
- timer_interrupt(irq, NULL);
- } else {
- write_c0_compare(read_c0_count() +
- (mips_hpt_frequency/HZ));
- local_timer_interrupt(irq, dev_id);
- }
- smtc_timer_broadcast();
- }
-#else /* CONFIG_MIPS_MT_SMTC */
- int r2 = cpu_has_mips_r2;
-
- if (handle_perf_irq(r2))
- goto out;
-
- if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
- goto out;
-
- if (cpu == 0) {
- /*
- * CPU 0 handles the global timer interrupt job and process
- * accounting resets count/compare registers to trigger next
- * timer int.
- */
- timer_interrupt(irq, NULL);
- } else {
- /* Everyone else needs to reset the timer int here as
- ll_local_timer_interrupt doesn't */
- /*
- * FIXME: need to cope with counter underflow.
- * More support needs to be added to kernel/time for
- * counter/timer interrupts on multiple CPU's
- */
- write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));
-
- /*
- * Other CPUs should do profiling and process accounting
- */
- local_timer_interrupt(irq, dev_id);
- }
-out:
-#endif /* CONFIG_MIPS_MT_SMTC */
- return IRQ_HANDLED;
-}
-
/*
* Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect
*/
mips_scroll_message();
}
-irqreturn_t mips_perf_interrupt(int irq, void *dev_id)
+static irqreturn_t mips_perf_interrupt(int irq, void *dev_id)
{
return perf_irq();
}
.name = "performance",
};
-void __init plat_perf_setup(struct irqaction *irq)
+void __init plat_perf_setup(void)
{
+ struct irqaction *irq = &perf_irqaction;
+
cp0_perfcount_irq = -1;
#ifdef MSC01E_INT_BASE
mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
}
- /* we are using the cpu counter for timer interrupts */
- irq->handler = mips_timer_interrupt; /* we use our own handler */
#ifdef CONFIG_MIPS_MT_SMTC
setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << cp0_compare_irq);
#else
set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
#endif
- plat_perf_setup(&perf_irqaction);
+ plat_perf_setup();
}
unsigned long cpu_khz;
-irqreturn_t sim_timer_interrupt(int irq, void *dev_id)
-{
-#ifdef CONFIG_SMP
- int cpu = smp_processor_id();
-
- /*
- * CPU 0 handles the global timer interrupt job
- * resets count/compare registers to trigger next timer int.
- */
-#ifndef CONFIG_MIPS_MT_SMTC
- if (cpu == 0) {
- timer_interrupt(irq, dev_id);
- } else {
- /* Everyone else needs to reset the timer int here as
- ll_local_timer_interrupt doesn't */
- /*
- * FIXME: need to cope with counter underflow.
- * More support needs to be added to kernel/time for
- * counter/timer interrupts on multiple CPU's
- */
- write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ));
- }
-#else /* SMTC */
- /*
- * In SMTC system, one Count/Compare set exists per VPE.
- * Which TC within a VPE gets the interrupt is essentially
- * random - we only know that it shouldn't be one with
- * IXMT set. Whichever TC gets the interrupt needs to
- * send special interprocessor interrupts to the other
- * TCs to make sure that they schedule, etc.
- *
- * That code is specific to the SMTC kernel, not to
- * the simulation platform, so it's invoked from
- * the general MIPS timer_interrupt routine.
- *
- * We have a problem in that the interrupt vector code
- * had to turn off the timer IM bit to avoid redundant
- * entries, but we may never get to mips_cpu_irq_end
- * to turn it back on again if the scheduler gets
- * involved. So we clear the pending timer here,
- * and re-enable the mask...
- */
-
- int vpflags = dvpe();
- write_c0_compare (read_c0_count() - 1);
- clear_c0_cause(0x100 << cp0_compare_irq);
- set_c0_status(0x100 << cp0_compare_irq);
- irq_enable_hazard();
- evpe(vpflags);
-
- if (cpu_data[cpu].vpe_id == 0)
- timer_interrupt(irq, dev_id);
- else
- write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ));
- smtc_timer_broadcast(cpu_data[cpu].vpe_id);
-
-#endif /* CONFIG_MIPS_MT_SMTC */
-
- /*
- * every CPU should do profiling and process accounting
- */
- local_timer_interrupt (irq, dev_id);
-
- return IRQ_HANDLED;
-#else
- return timer_interrupt (irq, dev_id);
-#endif
-}
-
-
-
/*
* Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect
*/
}
/* we are using the cpu counter for timer interrupts */
- irq->handler = sim_timer_interrupt;
setup_irq(mips_cpu_timer_irq, irq);
#ifdef CONFIG_SMP
#include <asm/mipsregs.h>
#include <asm/addrspace.h>
#include <asm/irq_cpu.h>
-
#include <asm/sgi/ioc.h>
#include <asm/sgi/hpc3.h>
#include <asm/sgi/ip22.h>
+#include <asm/time.h>
/* #define DEBUG_SGINT */
#define SGI_INTERRUPTS SGINT_LOCAL3
#endif
-extern void indy_r4k_timer_interrupt(void);
extern void indy_8254timer_irq(void);
/*
* First we check for r4k counter/timer IRQ.
*/
if (pending & CAUSEF_IP7)
- indy_r4k_timer_interrupt();
+ ll_timer_interrupt(SGI_TIMER_IRQ, NULL);
else if (pending & CAUSEF_IP2)
indy_local0_irqdispatch();
else if (pending & CAUSEF_IP3)
irq_exit();
}
-void indy_r4k_timer_interrupt(void)
-{
- int irq = SGI_TIMER_IRQ;
-
- irq_enter();
- kstat_this_cpu.irqs[irq]++;
- timer_interrupt(irq, NULL);
- irq_exit();
-}
-
void __init plat_timer_setup(struct irqaction *irq)
{
/* over-write the handler, we use our own way */
__raw_writeq(M_SCD_TIMER_ENABLE|M_SCD_TIMER_MODE_CONTINUOUS,
IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
- if (cpu == 0) {
- /*
- * CPU 0 handles the global timer interrupt job
- */
- ll_timer_interrupt(irq);
- }
- else {
- /*
- * other CPUs should just do profiling and process accounting
- */
- ll_local_timer_interrupt(irq);
- }
+ ll_timer_interrupt(irq);
}
static cycle_t bcm1480_hpt_read(void)
____raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
- if (cpu == 0) {
- /*
- * CPU 0 handles the global timer interrupt job
- */
- ll_timer_interrupt(irq);
- }
- else {
- /*
- * other CPUs should just do profiling and process accounting
- */
- ll_local_timer_interrupt(irq);
- }
+ ll_timer_interrupt(irq);
}
/*
extern struct clocksource clocksource_mips;
/*
- * high-level timer interrupt routines.
+ * The low-level timer interrupt routine.
*/
-extern irqreturn_t timer_interrupt(int irq, void *dev_id);
-
-/*
- * the corresponding low-level timer interrupt routine.
- */
-extern asmlinkage void ll_timer_interrupt(int irq);
+extern void ll_timer_interrupt(int irq, void *dev_id);
/*
* profiling and process accouting is done separately in local_timer_interrupt
*/
extern void local_timer_interrupt(int irq, void *dev_id);
-extern asmlinkage void ll_local_timer_interrupt(int irq);
/*
* board specific routines required by time_init().
*/
extern unsigned int mips_hpt_frequency;
+/*
+ * The performance counter IRQ on MIPS is a close relative to the timer IRQ
+ * so it lives here.
+ */
+extern int (*perf_irq)(void);
+
#endif /* _ASM_TIME_H */