/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
unsigned int new_);
-extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr,
- unsigned long old, unsigned long new_);
+extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
{
switch (size) {
#ifdef CONFIG_64BIT
- case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
+ case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr,
(unsigned int)old, (unsigned int)new_);
{
switch (size) {
#ifdef CONFIG_64BIT
- case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
+ case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
#endif
case 4: return __cmpxchg_u32(ptr, old, new_);
default:
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
+#define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
+
#endif /* _ASM_PARISC_CMPXCHG_H_ */
static unsigned long clocktick __read_mostly; /* timer cycles per tick */
+#ifndef CONFIG_64BIT
+/*
+ * The processor-internal cycle counter (Control Register 16) is used as time
+ * source for the sched_clock() function. This register is 64bit wide on a
+ * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always
+ * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits
+ * with a per-cpu variable which we increase every time the counter
+ * wraps-around (which happens every ~4 secounds).
+ */
+static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits);
+#endif
+
/*
* We keep time on PA-RISC Linux by using the Interval Timer which is
* a pair of registers; one is read-only and one is write-only; both
*/
mtctl(next_tick, 16);
+#if !defined(CONFIG_64BIT)
+ /* check for overflow on a 32bit kernel (every ~4 seconds). */
+ if (unlikely(next_tick < now))
+ this_cpu_inc(cr16_high_32_bits);
+#endif
+
/* Skip one clocktick on purpose if we missed next_tick.
* The new CR16 must be "later" than current CR16 otherwise
* itimer would not fire until CR16 wrapped - e.g 4 seconds
unsigned int cpu = smp_processor_id();
unsigned long next_tick = mfctl(16) + clocktick;
+#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
+ /* With multiple 64bit CPUs online, the cr16's are not syncronized. */
+ if (cpu != 0)
+ clear_sched_clock_stable();
+#endif
+
mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
per_cpu(cpu_data, cpu).it_value = next_tick;
}
}
+
+/*
+ * sched_clock() framework
+ */
+
+static u32 cyc2ns_mul __read_mostly;
+static u32 cyc2ns_shift __read_mostly;
+
+u64 sched_clock(void)
+{
+ u64 now;
+
+ /* Get current cycle counter (Control Register 16). */
+#ifdef CONFIG_64BIT
+ now = mfctl(16);
+#else
+ now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32);
+#endif
+
+ /* return the value in ns (cycles_2_ns) */
+ return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift);
+}
+
+
+/*
+ * timer interrupt and sched_clock() initialization
+ */
+
void __init time_init(void)
{
unsigned long current_cr16_khz;
+ current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
clocktick = (100 * PAGE0->mem_10msec) / HZ;
+ /* calculate mult/shift values for cr16 */
+ clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
+ NSEC_PER_MSEC, 0);
+
+#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
+ /* At bootup only one 64bit CPU is online and cr16 is "stable" */
+ set_sched_clock_stable();
+#endif
+
start_cpu_itimer(); /* get CPU 0 started */
/* register at clocksource framework */
- current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
}
}
-#ifdef CONFIG_64BIT
-unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new)
+u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
{
unsigned long flags;
- unsigned long prev;
+ u64 prev;
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
_atomic_spin_unlock_irqrestore(ptr, flags);
return prev;
}
-#endif
unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
{