Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
a23ba435 | 2 | * arch/sh/kernel/time_64.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | |
6c7e2a55 | 5 | * Copyright (C) 2003 - 2007 Paul Mundt |
1da177e4 LT |
6 | * Copyright (C) 2003 Richard Curnow |
7 | * | |
8 | * Original TMU/RTC code taken from sh version. | |
9 | * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka | |
10 | * Some code taken from i386 version. | |
11 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | |
a23ba435 PM |
12 | * |
13 | * This file is subject to the terms and conditions of the GNU General Public | |
14 | * License. See the file "COPYING" in the main directory of this archive | |
15 | * for more details. | |
1da177e4 | 16 | */ |
1da177e4 LT |
17 | #include <linux/errno.h> |
18 | #include <linux/rwsem.h> | |
19 | #include <linux/sched.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/param.h> | |
22 | #include <linux/string.h> | |
23 | #include <linux/mm.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/time.h> | |
26 | #include <linux/delay.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/profile.h> | |
29 | #include <linux/smp.h> | |
4940fb44 | 30 | #include <linux/module.h> |
4f3a36a7 | 31 | #include <linux/bcd.h> |
6c7e2a55 PM |
32 | #include <linux/timex.h> |
33 | #include <linux/irq.h> | |
b4eaa1cc | 34 | #include <linux/io.h> |
6c7e2a55 | 35 | #include <linux/platform_device.h> |
f15cbe6f PM |
36 | #include <cpu/registers.h> /* required by inline __asm__ stmt. */ |
37 | #include <cpu/irq.h> | |
b4eaa1cc | 38 | #include <asm/addrspace.h> |
1da177e4 LT |
39 | #include <asm/processor.h> |
40 | #include <asm/uaccess.h> | |
1da177e4 | 41 | #include <asm/delay.h> |
4d01cdaf | 42 | #include <asm/clock.h> |
1da177e4 LT |
43 | |
44 | #define TMU_TOCR_INIT 0x00 | |
45 | #define TMU0_TCR_INIT 0x0020 | |
46 | #define TMU_TSTR_INIT 1 | |
47 | #define TMU_TSTR_OFF 0 | |
48 | ||
6c7e2a55 PM |
49 | /* Real Time Clock */ |
50 | #define RTC_BLOCK_OFF 0x01040000 | |
51 | #define RTC_BASE PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF | |
52 | #define RTC_RCR1_CIE 0x10 /* Carry Interrupt Enable */ | |
53 | #define RTC_RCR1 (rtc_base + 0x38) | |
1da177e4 | 54 | |
1da177e4 LT |
55 | /* Time Management Unit */ |
56 | #define TMU_BLOCK_OFF 0x01020000 | |
57 | #define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF | |
58 | #define TMU0_BASE tmu_base + 0x8 + (0xc * 0x0) | |
59 | #define TMU1_BASE tmu_base + 0x8 + (0xc * 0x1) | |
60 | #define TMU2_BASE tmu_base + 0x8 + (0xc * 0x2) | |
61 | ||
62 | #define TMU_TOCR tmu_base+0x0 /* Byte access */ | |
63 | #define TMU_TSTR tmu_base+0x4 /* Byte access */ | |
64 | ||
65 | #define TMU0_TCOR TMU0_BASE+0x0 /* Long access */ | |
66 | #define TMU0_TCNT TMU0_BASE+0x4 /* Long access */ | |
67 | #define TMU0_TCR TMU0_BASE+0x8 /* Word access */ | |
68 | ||
1da177e4 LT |
69 | #define TICK_SIZE (tick_nsec / 1000) |
70 | ||
1da177e4 LT |
71 | static unsigned long tmu_base, rtc_base; |
72 | unsigned long cprc_base; | |
73 | ||
74 | /* Variables to allow interpolation of time of day to resolution better than a | |
75 | * jiffy. */ | |
76 | ||
77 | /* This is effectively protected by xtime_lock */ | |
78 | static unsigned long ctc_last_interrupt; | |
79 | static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */ | |
80 | ||
81 | #define CTC_JIFFY_SCALE_SHIFT 40 | |
82 | ||
83 | /* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */ | |
84 | static unsigned long long scaled_recip_ctc_ticks_per_jiffy; | |
85 | ||
86 | /* Estimate number of microseconds that have elapsed since the last timer tick, | |
0a354775 | 87 | by scaling the delta that has occurred in the CTC register. |
1da177e4 LT |
88 | |
89 | WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at | |
90 | the CPU clock rate. If the CPU sleeps, the CTC stops counting. Bear this | |
91 | in mind if enabling SLEEP_WORKS in process.c. In that case, this algorithm | |
92 | probably needs to use TMU.TCNT0 instead. This will work even if the CPU is | |
93 | sleeping, though will be coarser. | |
94 | ||
95 | FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime | |
96 | is running or if the freq or tick arguments of adjtimex are modified after | |
97 | we have calibrated the scaling factor? This will result in either a jump at | |
98 | the end of a tick period, or a wrap backwards at the start of the next one, | |
99 | if the application is reading the time of day often enough. I think we | |
100 | ought to do better than this. For this reason, usecs_per_jiffy is left | |
101 | separated out in the calculation below. This allows some future hook into | |
102 | the adjtime-related stuff in kernel/timer.c to remove this hazard. | |
103 | ||
104 | */ | |
105 | ||
106 | static unsigned long usecs_since_tick(void) | |
107 | { | |
108 | unsigned long long current_ctc; | |
109 | long ctc_ticks_since_interrupt; | |
110 | unsigned long long ull_ctc_ticks_since_interrupt; | |
111 | unsigned long result; | |
112 | ||
113 | unsigned long long mul1_out; | |
114 | unsigned long long mul1_out_high; | |
115 | unsigned long long mul2_out_low, mul2_out_high; | |
116 | ||
117 | /* Read CTC register */ | |
118 | asm ("getcon cr62, %0" : "=r" (current_ctc)); | |
119 | /* Note, the CTC counts down on each CPU clock, not up. | |
120 | Note(2), use long type to get correct wraparound arithmetic when | |
121 | the counter crosses zero. */ | |
122 | ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc; | |
123 | ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt; | |
124 | ||
125 | /* Inline assembly to do 32x32x32->64 multiplier */ | |
126 | asm volatile ("mulu.l %1, %2, %0" : | |
127 | "=r" (mul1_out) : | |
128 | "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy)); | |
129 | ||
130 | mul1_out_high = mul1_out >> 32; | |
131 | ||
132 | asm volatile ("mulu.l %1, %2, %0" : | |
133 | "=r" (mul2_out_low) : | |
134 | "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy)); | |
135 | ||
136 | #if 1 | |
137 | asm volatile ("mulu.l %1, %2, %0" : | |
138 | "=r" (mul2_out_high) : | |
139 | "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy)); | |
140 | #endif | |
141 | ||
142 | result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT); | |
143 | ||
144 | return result; | |
145 | } | |
146 | ||
147 | void do_gettimeofday(struct timeval *tv) | |
148 | { | |
149 | unsigned long flags; | |
150 | unsigned long seq; | |
151 | unsigned long usec, sec; | |
152 | ||
153 | do { | |
154 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | |
155 | usec = usecs_since_tick(); | |
1da177e4 LT |
156 | sec = xtime.tv_sec; |
157 | usec += xtime.tv_nsec / 1000; | |
158 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | |
159 | ||
160 | while (usec >= 1000000) { | |
161 | usec -= 1000000; | |
162 | sec++; | |
163 | } | |
164 | ||
165 | tv->tv_sec = sec; | |
166 | tv->tv_usec = usec; | |
167 | } | |
971ac16d | 168 | EXPORT_SYMBOL(do_gettimeofday); |
1da177e4 LT |
169 | |
170 | int do_settimeofday(struct timespec *tv) | |
171 | { | |
172 | time_t wtm_sec, sec = tv->tv_sec; | |
173 | long wtm_nsec, nsec = tv->tv_nsec; | |
174 | ||
175 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | |
176 | return -EINVAL; | |
177 | ||
178 | write_seqlock_irq(&xtime_lock); | |
179 | /* | |
180 | * This is revolting. We need to set "xtime" correctly. However, the | |
181 | * value in this location is the value at the most recent update of | |
182 | * wall time. Discover what correction gettimeofday() would have | |
183 | * made, and then undo it! | |
184 | */ | |
8ef38609 | 185 | nsec -= 1000 * usecs_since_tick(); |
1da177e4 LT |
186 | |
187 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | |
188 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | |
189 | ||
190 | set_normalized_timespec(&xtime, sec, nsec); | |
191 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | |
192 | ||
b149ee22 | 193 | ntp_clear(); |
1da177e4 LT |
194 | write_sequnlock_irq(&xtime_lock); |
195 | clock_was_set(); | |
196 | ||
197 | return 0; | |
198 | } | |
943eae03 | 199 | EXPORT_SYMBOL(do_settimeofday); |
1da177e4 | 200 | |
6c7e2a55 PM |
201 | /* Dummy RTC ops */ |
202 | static void null_rtc_get_time(struct timespec *tv) | |
1da177e4 | 203 | { |
6c7e2a55 PM |
204 | tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0); |
205 | tv->tv_nsec = 0; | |
206 | } | |
1da177e4 | 207 | |
6c7e2a55 PM |
208 | static int null_rtc_set_time(const time_t secs) |
209 | { | |
210 | return 0; | |
1da177e4 LT |
211 | } |
212 | ||
6c7e2a55 PM |
213 | void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; |
214 | int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; | |
215 | ||
1da177e4 | 216 | /* last time the RTC clock got updated */ |
6c7e2a55 | 217 | static long last_rtc_update; |
1da177e4 LT |
218 | |
219 | /* | |
220 | * timer_interrupt() needs to keep up the real-time clock, | |
221 | * as well as call the "do_timer()" routine every clocktick | |
222 | */ | |
a226d33a | 223 | static inline void do_timer_interrupt(void) |
1da177e4 LT |
224 | { |
225 | unsigned long long current_ctc; | |
960c65e8 PZ |
226 | |
227 | if (current->pid) | |
228 | profile_tick(CPU_PROFILING); | |
229 | ||
230 | /* | |
231 | * Here we are in the timer irq handler. We just have irqs locally | |
232 | * disabled but we don't know if the timer_bh is running on the other | |
233 | * CPU. We need to avoid to SMP race with it. NOTE: we don' t need | |
234 | * the irq version of write_lock because as just said we have irq | |
235 | * locally disabled. -arca | |
236 | */ | |
9141d30a | 237 | write_seqlock(&xtime_lock); |
1da177e4 LT |
238 | asm ("getcon cr62, %0" : "=r" (current_ctc)); |
239 | ctc_last_interrupt = (unsigned long) current_ctc; | |
240 | ||
3171a030 | 241 | do_timer(1); |
1da177e4 | 242 | |
1da177e4 LT |
243 | /* |
244 | * If we have an externally synchronized Linux clock, then update | |
245 | * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be | |
246 | * called as close as possible to 500 ms before the new second starts. | |
247 | */ | |
b149ee22 | 248 | if (ntp_synced() && |
1da177e4 LT |
249 | xtime.tv_sec > last_rtc_update + 660 && |
250 | (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && | |
251 | (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { | |
6c7e2a55 | 252 | if (rtc_sh_set_time(xtime.tv_sec) == 0) |
1da177e4 LT |
253 | last_rtc_update = xtime.tv_sec; |
254 | else | |
6c7e2a55 PM |
255 | /* do it again in 60 s */ |
256 | last_rtc_update = xtime.tv_sec - 600; | |
1da177e4 | 257 | } |
9141d30a | 258 | write_sequnlock(&xtime_lock); |
960c65e8 PZ |
259 | |
260 | #ifndef CONFIG_SMP | |
261 | update_process_times(user_mode(get_irq_regs())); | |
262 | #endif | |
1da177e4 LT |
263 | } |
264 | ||
265 | /* | |
266 | * This is the same as the above, except we _also_ save the current | |
267 | * Time Stamp Counter value at the time of the timer interrupt, so that | |
268 | * we later on can estimate the time of day more exactly. | |
269 | */ | |
a226d33a | 270 | static irqreturn_t timer_interrupt(int irq, void *dev_id) |
1da177e4 LT |
271 | { |
272 | unsigned long timer_status; | |
273 | ||
274 | /* Clear UNF bit */ | |
275 | timer_status = ctrl_inw(TMU0_TCR); | |
276 | timer_status &= ~0x100; | |
277 | ctrl_outw(timer_status, TMU0_TCR); | |
278 | ||
a226d33a | 279 | do_timer_interrupt(); |
1da177e4 LT |
280 | |
281 | return IRQ_HANDLED; | |
282 | } | |
283 | ||
948d12cb TG |
284 | static struct irqaction irq0 = { |
285 | .handler = timer_interrupt, | |
286 | .flags = IRQF_DISABLED, | |
287 | .mask = CPU_MASK_NONE, | |
288 | .name = "timer", | |
289 | }; | |
1da177e4 LT |
290 | |
291 | void __init time_init(void) | |
292 | { | |
1da177e4 | 293 | unsigned long interval; |
4d01cdaf | 294 | struct clk *clk; |
1da177e4 LT |
295 | |
296 | tmu_base = onchip_remap(TMU_BASE, 1024, "TMU"); | |
297 | if (!tmu_base) { | |
298 | panic("Unable to remap TMU\n"); | |
299 | } | |
300 | ||
301 | rtc_base = onchip_remap(RTC_BASE, 1024, "RTC"); | |
302 | if (!rtc_base) { | |
303 | panic("Unable to remap RTC\n"); | |
304 | } | |
305 | ||
4d01cdaf PM |
306 | clk = clk_get(NULL, "cpu_clk"); |
307 | scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / | |
308 | (unsigned long long)(clk_get_rate(clk) / HZ)); | |
1da177e4 | 309 | |
6c7e2a55 | 310 | rtc_sh_get_time(&xtime); |
1da177e4 LT |
311 | |
312 | setup_irq(TIMER_IRQ, &irq0); | |
1da177e4 | 313 | |
4d01cdaf PM |
314 | clk = clk_get(NULL, "module_clk"); |
315 | interval = (clk_get_rate(clk)/(HZ*4)); | |
1da177e4 LT |
316 | |
317 | printk("Interval = %ld\n", interval); | |
318 | ||
1da177e4 LT |
319 | /* Start TMU0 */ |
320 | ctrl_outb(TMU_TSTR_OFF, TMU_TSTR); | |
321 | ctrl_outb(TMU_TOCR_INIT, TMU_TOCR); | |
322 | ctrl_outw(TMU0_TCR_INIT, TMU0_TCR); | |
323 | ctrl_outl(interval, TMU0_TCOR); | |
324 | ctrl_outl(interval, TMU0_TCNT); | |
325 | ctrl_outb(TMU_TSTR_INIT, TMU_TSTR); | |
326 | } | |
327 | ||
6c7e2a55 PM |
328 | static struct resource rtc_resources[] = { |
329 | [0] = { | |
330 | /* RTC base, filled in by rtc_init */ | |
331 | .flags = IORESOURCE_IO, | |
332 | }, | |
333 | [1] = { | |
334 | /* Period IRQ */ | |
335 | .start = IRQ_PRI, | |
336 | .flags = IORESOURCE_IRQ, | |
337 | }, | |
338 | [2] = { | |
339 | /* Carry IRQ */ | |
340 | .start = IRQ_CUI, | |
341 | .flags = IORESOURCE_IRQ, | |
342 | }, | |
343 | [3] = { | |
344 | /* Alarm IRQ */ | |
345 | .start = IRQ_ATI, | |
346 | .flags = IORESOURCE_IRQ, | |
347 | }, | |
348 | }; | |
349 | ||
350 | static struct platform_device rtc_device = { | |
351 | .name = "sh-rtc", | |
352 | .id = -1, | |
353 | .num_resources = ARRAY_SIZE(rtc_resources), | |
354 | .resource = rtc_resources, | |
355 | }; | |
356 | ||
357 | static int __init rtc_init(void) | |
358 | { | |
359 | rtc_resources[0].start = rtc_base; | |
360 | rtc_resources[0].end = rtc_resources[0].start + 0x58 - 1; | |
361 | ||
362 | return platform_device_register(&rtc_device); | |
363 | } | |
364 | device_initcall(rtc_init); |