[PATCH] fbdev: Fix crashes in various fbdev's blank routines
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / time.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Common time routines among all ppc machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8 *
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
14 *
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17 *
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase
21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
25 *
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
28 *
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
33 */
34
1da177e4
LT
35#include <linux/errno.h>
36#include <linux/module.h>
37#include <linux/sched.h>
38#include <linux/kernel.h>
39#include <linux/param.h>
40#include <linux/string.h>
41#include <linux/mm.h>
42#include <linux/interrupt.h>
43#include <linux/timex.h>
44#include <linux/kernel_stat.h>
1da177e4
LT
45#include <linux/time.h>
46#include <linux/init.h>
47#include <linux/profile.h>
48#include <linux/cpu.h>
49#include <linux/security.h>
f2783c15
PM
50#include <linux/percpu.h>
51#include <linux/rtc.h>
092b8f34 52#include <linux/jiffies.h>
c6622f63 53#include <linux/posix-timers.h>
1da177e4 54
1da177e4
LT
55#include <asm/io.h>
56#include <asm/processor.h>
57#include <asm/nvram.h>
58#include <asm/cache.h>
59#include <asm/machdep.h>
1da177e4
LT
60#include <asm/uaccess.h>
61#include <asm/time.h>
1da177e4 62#include <asm/prom.h>
f2783c15
PM
63#include <asm/irq.h>
64#include <asm/div64.h>
2249ca9d 65#include <asm/smp.h>
a7f290da 66#include <asm/vdso_datapage.h>
f2783c15 67#ifdef CONFIG_PPC64
1ababe11 68#include <asm/firmware.h>
f2783c15
PM
69#endif
70#ifdef CONFIG_PPC_ISERIES
8875ccfb 71#include <asm/iseries/it_lp_queue.h>
8021b8a7 72#include <asm/iseries/hv_call_xm.h>
f2783c15 73#endif
732ee21f 74#include <asm/smp.h>
1da177e4 75
1da177e4
LT
76/* keep track of when we need to update the rtc */
77time_t last_rtc_update;
1da177e4
LT
78#ifdef CONFIG_PPC_ISERIES
79unsigned long iSeries_recal_titan = 0;
80unsigned long iSeries_recal_tb = 0;
81static unsigned long first_settimeofday = 1;
82#endif
83
f2783c15
PM
84/* The decrementer counts down by 128 every 128ns on a 601. */
85#define DECREMENTER_COUNT_601 (1000000000 / HZ)
86
1da177e4
LT
87#define XSEC_PER_SEC (1024*1024)
88
f2783c15
PM
89#ifdef CONFIG_PPC64
90#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
91#else
92/* compute ((xsec << 12) * max) >> 32 */
93#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
94#endif
95
1da177e4
LT
96unsigned long tb_ticks_per_jiffy;
97unsigned long tb_ticks_per_usec = 100; /* sane default */
98EXPORT_SYMBOL(tb_ticks_per_usec);
99unsigned long tb_ticks_per_sec;
2cf82c02 100EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
f2783c15
PM
101u64 tb_to_xs;
102unsigned tb_to_us;
092b8f34 103
19923c19 104#define TICKLEN_SCALE TICK_LENGTH_SHIFT
092b8f34
PM
105u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
106u64 ticklen_to_xs; /* 0.64 fraction */
107
108/* If last_tick_len corresponds to about 1/HZ seconds, then
109 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
110#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
111
1da177e4 112DEFINE_SPINLOCK(rtc_lock);
6ae3db11 113EXPORT_SYMBOL_GPL(rtc_lock);
1da177e4 114
f2783c15
PM
115u64 tb_to_ns_scale;
116unsigned tb_to_ns_shift;
1da177e4
LT
117
118struct gettimeofday_struct do_gtod;
119
120extern unsigned long wall_jiffies;
1da177e4
LT
121
122extern struct timezone sys_tz;
f2783c15 123static long timezone_offset;
1da177e4 124
10f7e7c1
AB
125unsigned long ppc_proc_freq;
126unsigned long ppc_tb_freq;
127
96c44507
PM
128u64 tb_last_jiffy __cacheline_aligned_in_smp;
129unsigned long tb_last_stamp;
130
131/*
132 * Note that on ppc32 this only stores the bottom 32 bits of
133 * the timebase value, but that's enough to tell when a jiffy
134 * has passed.
135 */
136DEFINE_PER_CPU(unsigned long, last_jiffy);
137
c6622f63
PM
138#ifdef CONFIG_VIRT_CPU_ACCOUNTING
139/*
140 * Factors for converting from cputime_t (timebase ticks) to
141 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
142 * These are all stored as 0.64 fixed-point binary fractions.
143 */
144u64 __cputime_jiffies_factor;
2cf82c02 145EXPORT_SYMBOL(__cputime_jiffies_factor);
c6622f63 146u64 __cputime_msec_factor;
2cf82c02 147EXPORT_SYMBOL(__cputime_msec_factor);
c6622f63 148u64 __cputime_sec_factor;
2cf82c02 149EXPORT_SYMBOL(__cputime_sec_factor);
c6622f63 150u64 __cputime_clockt_factor;
2cf82c02 151EXPORT_SYMBOL(__cputime_clockt_factor);
c6622f63
PM
152
153static void calc_cputime_factors(void)
154{
155 struct div_result res;
156
157 div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
158 __cputime_jiffies_factor = res.result_low;
159 div128_by_32(1000, 0, tb_ticks_per_sec, &res);
160 __cputime_msec_factor = res.result_low;
161 div128_by_32(1, 0, tb_ticks_per_sec, &res);
162 __cputime_sec_factor = res.result_low;
163 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
164 __cputime_clockt_factor = res.result_low;
165}
166
167/*
168 * Read the PURR on systems that have it, otherwise the timebase.
169 */
170static u64 read_purr(void)
171{
172 if (cpu_has_feature(CPU_FTR_PURR))
173 return mfspr(SPRN_PURR);
174 return mftb();
175}
176
177/*
178 * Account time for a transition between system, hard irq
179 * or soft irq state.
180 */
181void account_system_vtime(struct task_struct *tsk)
182{
183 u64 now, delta;
184 unsigned long flags;
185
186 local_irq_save(flags);
187 now = read_purr();
188 delta = now - get_paca()->startpurr;
189 get_paca()->startpurr = now;
190 if (!in_interrupt()) {
191 delta += get_paca()->system_time;
192 get_paca()->system_time = 0;
193 }
194 account_system_time(tsk, 0, delta);
195 local_irq_restore(flags);
196}
197
198/*
199 * Transfer the user and system times accumulated in the paca
200 * by the exception entry and exit code to the generic process
201 * user and system time records.
202 * Must be called with interrupts disabled.
203 */
204void account_process_vtime(struct task_struct *tsk)
205{
206 cputime_t utime;
207
208 utime = get_paca()->user_time;
209 get_paca()->user_time = 0;
210 account_user_time(tsk, utime);
211}
212
213static void account_process_time(struct pt_regs *regs)
214{
215 int cpu = smp_processor_id();
216
217 account_process_vtime(current);
218 run_local_timers();
219 if (rcu_pending(cpu))
220 rcu_check_callbacks(cpu, user_mode(regs));
221 scheduler_tick();
222 run_posix_cpu_timers(current);
223}
224
225#ifdef CONFIG_PPC_SPLPAR
226/*
227 * Stuff for accounting stolen time.
228 */
229struct cpu_purr_data {
230 int initialized; /* thread is running */
231 u64 tb0; /* timebase at origin time */
232 u64 purr0; /* PURR at origin time */
233 u64 tb; /* last TB value read */
234 u64 purr; /* last PURR value read */
235 u64 stolen; /* stolen time so far */
236 spinlock_t lock;
237};
238
239static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
240
241static void snapshot_tb_and_purr(void *data)
242{
243 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
244
245 p->tb0 = mftb();
246 p->purr0 = mfspr(SPRN_PURR);
247 p->tb = p->tb0;
248 p->purr = 0;
249 wmb();
250 p->initialized = 1;
251}
252
253/*
254 * Called during boot when all cpus have come up.
255 */
256void snapshot_timebases(void)
257{
258 int cpu;
259
260 if (!cpu_has_feature(CPU_FTR_PURR))
261 return;
0e551954 262 for_each_possible_cpu(cpu)
c6622f63
PM
263 spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock);
264 on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
265}
266
267void calculate_steal_time(void)
268{
269 u64 tb, purr, t0;
270 s64 stolen;
271 struct cpu_purr_data *p0, *pme, *phim;
272 int cpu;
273
274 if (!cpu_has_feature(CPU_FTR_PURR))
275 return;
276 cpu = smp_processor_id();
277 pme = &per_cpu(cpu_purr_data, cpu);
278 if (!pme->initialized)
279 return; /* this can happen in early boot */
280 p0 = &per_cpu(cpu_purr_data, cpu & ~1);
281 phim = &per_cpu(cpu_purr_data, cpu ^ 1);
282 spin_lock(&p0->lock);
283 tb = mftb();
284 purr = mfspr(SPRN_PURR) - pme->purr0;
285 if (!phim->initialized || !cpu_online(cpu ^ 1)) {
286 stolen = (tb - pme->tb) - (purr - pme->purr);
287 } else {
288 t0 = pme->tb0;
289 if (phim->tb0 < t0)
290 t0 = phim->tb0;
291 stolen = phim->tb - t0 - phim->purr - purr - p0->stolen;
292 }
293 if (stolen > 0) {
294 account_steal_time(current, stolen);
295 p0->stolen += stolen;
296 }
297 pme->tb = tb;
298 pme->purr = purr;
299 spin_unlock(&p0->lock);
300}
301
302/*
303 * Must be called before the cpu is added to the online map when
304 * a cpu is being brought up at runtime.
305 */
306static void snapshot_purr(void)
307{
308 int cpu;
309 u64 purr;
310 struct cpu_purr_data *p0, *pme, *phim;
311 unsigned long flags;
312
313 if (!cpu_has_feature(CPU_FTR_PURR))
314 return;
315 cpu = smp_processor_id();
316 pme = &per_cpu(cpu_purr_data, cpu);
317 p0 = &per_cpu(cpu_purr_data, cpu & ~1);
318 phim = &per_cpu(cpu_purr_data, cpu ^ 1);
319 spin_lock_irqsave(&p0->lock, flags);
320 pme->tb = pme->tb0 = mftb();
321 purr = mfspr(SPRN_PURR);
322 if (!phim->initialized) {
323 pme->purr = 0;
324 pme->purr0 = purr;
325 } else {
326 /* set p->purr and p->purr0 for no change in p0->stolen */
327 pme->purr = phim->tb - phim->tb0 - phim->purr - p0->stolen;
328 pme->purr0 = purr - pme->purr;
329 }
330 pme->initialized = 1;
331 spin_unlock_irqrestore(&p0->lock, flags);
332}
333
334#endif /* CONFIG_PPC_SPLPAR */
335
336#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
337#define calc_cputime_factors()
338#define account_process_time(regs) update_process_times(user_mode(regs))
339#define calculate_steal_time() do { } while (0)
340#endif
341
342#if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
343#define snapshot_purr() do { } while (0)
344#endif
345
346/*
347 * Called when a cpu comes up after the system has finished booting,
348 * i.e. as a result of a hotplug cpu action.
349 */
350void snapshot_timebase(void)
351{
352 __get_cpu_var(last_jiffy) = get_tb();
353 snapshot_purr();
354}
355
6defa38b
PM
356void __delay(unsigned long loops)
357{
358 unsigned long start;
359 int diff;
360
361 if (__USE_RTC()) {
362 start = get_rtcl();
363 do {
364 /* the RTCL register wraps at 1000000000 */
365 diff = get_rtcl() - start;
366 if (diff < 0)
367 diff += 1000000000;
368 } while (diff < loops);
369 } else {
370 start = get_tbl();
371 while (get_tbl() - start < loops)
372 HMT_low();
373 HMT_medium();
374 }
375}
376EXPORT_SYMBOL(__delay);
377
378void udelay(unsigned long usecs)
379{
380 __delay(tb_ticks_per_usec * usecs);
381}
382EXPORT_SYMBOL(udelay);
383
1da177e4
LT
384static __inline__ void timer_check_rtc(void)
385{
386 /*
387 * update the rtc when needed, this should be performed on the
388 * right fraction of a second. Half or full second ?
389 * Full second works on mk48t59 clocks, others need testing.
390 * Note that this update is basically only used through
391 * the adjtimex system calls. Setting the HW clock in
392 * any other way is a /dev/rtc and userland business.
393 * This is still wrong by -0.5/+1.5 jiffies because of the
394 * timer interrupt resolution and possible delay, but here we
395 * hit a quantization limit which can only be solved by higher
396 * resolution timers and decoupling time management from timer
397 * interrupts. This is also wrong on the clocks
398 * which require being written at the half second boundary.
399 * We should have an rtc call that only sets the minutes and
400 * seconds like on Intel to avoid problems with non UTC clocks.
401 */
d2e61512 402 if (ppc_md.set_rtc_time && ntp_synced() &&
f2783c15 403 xtime.tv_sec - last_rtc_update >= 659 &&
092b8f34 404 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) {
f2783c15
PM
405 struct rtc_time tm;
406 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
407 tm.tm_year -= 1900;
408 tm.tm_mon -= 1;
409 if (ppc_md.set_rtc_time(&tm) == 0)
410 last_rtc_update = xtime.tv_sec + 1;
411 else
412 /* Try again one minute later */
413 last_rtc_update += 60;
1da177e4
LT
414 }
415}
416
417/*
418 * This version of gettimeofday has microsecond resolution.
419 */
5db9fa95 420static inline void __do_gettimeofday(struct timeval *tv)
1da177e4 421{
f2783c15
PM
422 unsigned long sec, usec;
423 u64 tb_ticks, xsec;
424 struct gettimeofday_vars *temp_varp;
425 u64 temp_tb_to_xs, temp_stamp_xsec;
1da177e4
LT
426
427 /*
428 * These calculations are faster (gets rid of divides)
429 * if done in units of 1/2^20 rather than microseconds.
430 * The conversion to microseconds at the end is done
431 * without a divide (and in fact, without a multiply)
432 */
433 temp_varp = do_gtod.varp;
5db9fa95
NL
434
435 /* Sampling the time base must be done after loading
436 * do_gtod.varp in order to avoid racing with update_gtod.
437 */
438 data_barrier(temp_varp);
439 tb_ticks = get_tb() - temp_varp->tb_orig_stamp;
1da177e4
LT
440 temp_tb_to_xs = temp_varp->tb_to_xs;
441 temp_stamp_xsec = temp_varp->stamp_xsec;
f2783c15 442 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
1da177e4 443 sec = xsec / XSEC_PER_SEC;
f2783c15
PM
444 usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
445 usec = SCALE_XSEC(usec, 1000000);
1da177e4
LT
446
447 tv->tv_sec = sec;
448 tv->tv_usec = usec;
449}
450
451void do_gettimeofday(struct timeval *tv)
452{
96c44507
PM
453 if (__USE_RTC()) {
454 /* do this the old way */
455 unsigned long flags, seq;
092b8f34 456 unsigned int sec, nsec, usec;
96c44507
PM
457
458 do {
459 seq = read_seqbegin_irqsave(&xtime_lock, flags);
460 sec = xtime.tv_sec;
461 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);
96c44507 462 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
092b8f34 463 usec = nsec / 1000;
96c44507
PM
464 while (usec >= 1000000) {
465 usec -= 1000000;
466 ++sec;
467 }
468 tv->tv_sec = sec;
469 tv->tv_usec = usec;
470 return;
471 }
5db9fa95 472 __do_gettimeofday(tv);
1da177e4
LT
473}
474
475EXPORT_SYMBOL(do_gettimeofday);
476
1da177e4 477/*
f2783c15
PM
478 * There are two copies of tb_to_xs and stamp_xsec so that no
479 * lock is needed to access and use these values in
480 * do_gettimeofday. We alternate the copies and as long as a
481 * reasonable time elapses between changes, there will never
482 * be inconsistent values. ntpd has a minimum of one minute
483 * between updates.
1da177e4 484 */
f2783c15 485static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
5d14a18d 486 u64 new_tb_to_xs)
1da177e4 487{
1da177e4 488 unsigned temp_idx;
f2783c15 489 struct gettimeofday_vars *temp_varp;
1da177e4
LT
490
491 temp_idx = (do_gtod.var_idx == 0);
492 temp_varp = &do_gtod.vars[temp_idx];
493
f2783c15
PM
494 temp_varp->tb_to_xs = new_tb_to_xs;
495 temp_varp->tb_orig_stamp = new_tb_stamp;
1da177e4 496 temp_varp->stamp_xsec = new_stamp_xsec;
0d8d4d42 497 smp_mb();
1da177e4
LT
498 do_gtod.varp = temp_varp;
499 do_gtod.var_idx = temp_idx;
500
f2783c15
PM
501 /*
502 * tb_update_count is used to allow the userspace gettimeofday code
503 * to assure itself that it sees a consistent view of the tb_to_xs and
504 * stamp_xsec variables. It reads the tb_update_count, then reads
505 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
506 * the two values of tb_update_count match and are even then the
507 * tb_to_xs and stamp_xsec values are consistent. If not, then it
508 * loops back and reads them again until this criteria is met.
0a45d449
PM
509 * We expect the caller to have done the first increment of
510 * vdso_data->tb_update_count already.
f2783c15 511 */
a7f290da
BH
512 vdso_data->tb_orig_stamp = new_tb_stamp;
513 vdso_data->stamp_xsec = new_stamp_xsec;
514 vdso_data->tb_to_xs = new_tb_to_xs;
515 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
516 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
0d8d4d42 517 smp_wmb();
a7f290da 518 ++(vdso_data->tb_update_count);
f2783c15
PM
519}
520
521/*
522 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
523 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
524 * difference tb - tb_orig_stamp small enough to always fit inside a
525 * 32 bits number. This is a requirement of our fast 32 bits userland
526 * implementation in the vdso. If we "miss" a call to this function
527 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
528 * with a too big difference, then the vdso will fallback to calling
529 * the syscall
530 */
531static __inline__ void timer_recalc_offset(u64 cur_tb)
532{
533 unsigned long offset;
534 u64 new_stamp_xsec;
092b8f34 535 u64 tlen, t2x;
0a45d449
PM
536 u64 tb, xsec_old, xsec_new;
537 struct gettimeofday_vars *varp;
f2783c15 538
96c44507
PM
539 if (__USE_RTC())
540 return;
19923c19 541 tlen = current_tick_length();
f2783c15 542 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
0a45d449
PM
543 if (tlen == last_tick_len && offset < 0x80000000u)
544 return;
092b8f34
PM
545 if (tlen != last_tick_len) {
546 t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
547 last_tick_len = tlen;
548 } else
549 t2x = do_gtod.varp->tb_to_xs;
550 new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
551 do_div(new_stamp_xsec, 1000000000);
552 new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
0a45d449
PM
553
554 ++vdso_data->tb_update_count;
555 smp_mb();
556
557 /*
558 * Make sure time doesn't go backwards for userspace gettimeofday.
559 */
560 tb = get_tb();
561 varp = do_gtod.varp;
562 xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs)
563 + varp->stamp_xsec;
564 xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec;
565 if (xsec_new < xsec_old)
566 new_stamp_xsec += xsec_old - xsec_new;
567
092b8f34 568 update_gtod(cur_tb, new_stamp_xsec, t2x);
1da177e4
LT
569}
570
571#ifdef CONFIG_SMP
572unsigned long profile_pc(struct pt_regs *regs)
573{
574 unsigned long pc = instruction_pointer(regs);
575
576 if (in_lock_functions(pc))
577 return regs->link;
578
579 return pc;
580}
581EXPORT_SYMBOL(profile_pc);
582#endif
583
584#ifdef CONFIG_PPC_ISERIES
585
586/*
587 * This function recalibrates the timebase based on the 49-bit time-of-day
588 * value in the Titan chip. The Titan is much more accurate than the value
589 * returned by the service processor for the timebase frequency.
590 */
591
592static void iSeries_tb_recal(void)
593{
594 struct div_result divres;
595 unsigned long titan, tb;
596 tb = get_tb();
597 titan = HvCallXm_loadTod();
598 if ( iSeries_recal_titan ) {
599 unsigned long tb_ticks = tb - iSeries_recal_tb;
600 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
601 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
602 unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
603 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
604 char sign = '+';
605 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
606 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
607
608 if ( tick_diff < 0 ) {
609 tick_diff = -tick_diff;
610 sign = '-';
611 }
612 if ( tick_diff ) {
613 if ( tick_diff < tb_ticks_per_jiffy/25 ) {
614 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
615 new_tb_ticks_per_jiffy, sign, tick_diff );
616 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
617 tb_ticks_per_sec = new_tb_ticks_per_sec;
c6622f63 618 calc_cputime_factors();
1da177e4
LT
619 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
620 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
621 tb_to_xs = divres.result_low;
622 do_gtod.varp->tb_to_xs = tb_to_xs;
a7f290da
BH
623 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
624 vdso_data->tb_to_xs = tb_to_xs;
1da177e4
LT
625 }
626 else {
627 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
628 " new tb_ticks_per_jiffy = %lu\n"
629 " old tb_ticks_per_jiffy = %lu\n",
630 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
631 }
632 }
633 }
634 iSeries_recal_titan = titan;
635 iSeries_recal_tb = tb;
636}
637#endif
638
639/*
640 * For iSeries shared processors, we have to let the hypervisor
641 * set the hardware decrementer. We set a virtual decrementer
642 * in the lppaca and call the hypervisor if the virtual
643 * decrementer is less than the current value in the hardware
644 * decrementer. (almost always the new decrementer value will
645 * be greater than the current hardware decementer so the hypervisor
646 * call will not be needed)
647 */
648
1da177e4
LT
649/*
650 * timer_interrupt - gets called when the decrementer overflows,
651 * with interrupts disabled.
652 */
c7aeffc4 653void timer_interrupt(struct pt_regs * regs)
1da177e4
LT
654{
655 int next_dec;
f2783c15
PM
656 int cpu = smp_processor_id();
657 unsigned long ticks;
5db9fa95 658 u64 tb_next_jiffy;
f2783c15
PM
659
660#ifdef CONFIG_PPC32
661 if (atomic_read(&ppc_n_lost_interrupts) != 0)
662 do_IRQ(regs);
663#endif
1da177e4
LT
664
665 irq_enter();
666
1da177e4 667 profile_tick(CPU_PROFILING, regs);
c6622f63 668 calculate_steal_time();
1da177e4 669
f2783c15 670#ifdef CONFIG_PPC_ISERIES
3356bb9f 671 get_lppaca()->int_dword.fields.decr_int = 0;
f2783c15
PM
672#endif
673
674 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
675 >= tb_ticks_per_jiffy) {
676 /* Update last_jiffy */
677 per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;
678 /* Handle RTCL overflow on 601 */
679 if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)
680 per_cpu(last_jiffy, cpu) -= 1000000000;
1da177e4 681
1da177e4
LT
682 /*
683 * We cannot disable the decrementer, so in the period
684 * between this cpu's being marked offline in cpu_online_map
685 * and calling stop-self, it is taking timer interrupts.
686 * Avoid calling into the scheduler rebalancing code if this
687 * is the case.
688 */
689 if (!cpu_is_offline(cpu))
c6622f63 690 account_process_time(regs);
f2783c15 691
1da177e4
LT
692 /*
693 * No need to check whether cpu is offline here; boot_cpuid
694 * should have been fixed up by now.
695 */
f2783c15
PM
696 if (cpu != boot_cpuid)
697 continue;
698
699 write_seqlock(&xtime_lock);
5db9fa95
NL
700 tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy;
701 if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
702 tb_last_jiffy = tb_next_jiffy;
703 tb_last_stamp = per_cpu(last_jiffy, cpu);
704 do_timer(regs);
705 timer_recalc_offset(tb_last_jiffy);
706 timer_check_rtc();
707 }
f2783c15 708 write_sequnlock(&xtime_lock);
1da177e4
LT
709 }
710
f2783c15 711 next_dec = tb_ticks_per_jiffy - ticks;
1da177e4
LT
712 set_dec(next_dec);
713
714#ifdef CONFIG_PPC_ISERIES
937b31b1 715 if (hvlpevent_is_pending())
74889802 716 process_hvlpevents(regs);
1da177e4
LT
717#endif
718
f2783c15 719#ifdef CONFIG_PPC64
8d15a3e5 720 /* collect purr register values often, for accurate calculations */
1ababe11 721 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
1da177e4
LT
722 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
723 cu->current_tb = mfspr(SPRN_PURR);
724 }
f2783c15 725#endif
1da177e4
LT
726
727 irq_exit();
1da177e4
LT
728}
729
f2783c15
PM
730void wakeup_decrementer(void)
731{
092b8f34 732 unsigned long ticks;
f2783c15 733
f2783c15 734 /*
092b8f34
PM
735 * The timebase gets saved on sleep and restored on wakeup,
736 * so all we need to do is to reset the decrementer.
f2783c15 737 */
092b8f34
PM
738 ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
739 if (ticks < tb_ticks_per_jiffy)
740 ticks = tb_ticks_per_jiffy - ticks;
741 else
742 ticks = 1;
743 set_dec(ticks);
f2783c15
PM
744}
745
a5b518ed 746#ifdef CONFIG_SMP
f2783c15
PM
747void __init smp_space_timers(unsigned int max_cpus)
748{
749 int i;
c6622f63 750 unsigned long half = tb_ticks_per_jiffy / 2;
f2783c15
PM
751 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
752 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
753
cbe62e2b
PM
754 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
755 previous_tb -= tb_ticks_per_jiffy;
c6622f63
PM
756 /*
757 * The stolen time calculation for POWER5 shared-processor LPAR
758 * systems works better if the two threads' timebase interrupts
759 * are staggered by half a jiffy with respect to each other.
760 */
0e551954 761 for_each_possible_cpu(i) {
c6622f63
PM
762 if (i == boot_cpuid)
763 continue;
764 if (i == (boot_cpuid ^ 1))
765 per_cpu(last_jiffy, i) =
766 per_cpu(last_jiffy, boot_cpuid) - half;
767 else if (i & 1)
768 per_cpu(last_jiffy, i) =
769 per_cpu(last_jiffy, i ^ 1) + half;
770 else {
f2783c15
PM
771 previous_tb += offset;
772 per_cpu(last_jiffy, i) = previous_tb;
773 }
774 }
775}
776#endif
777
1da177e4
LT
778/*
779 * Scheduler clock - returns current time in nanosec units.
780 *
781 * Note: mulhdu(a, b) (multiply high double unsigned) returns
782 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
783 * are 64-bit unsigned numbers.
784 */
785unsigned long long sched_clock(void)
786{
96c44507
PM
787 if (__USE_RTC())
788 return get_rtc();
1da177e4
LT
789 return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;
790}
791
792int do_settimeofday(struct timespec *tv)
793{
794 time_t wtm_sec, new_sec = tv->tv_sec;
795 long wtm_nsec, new_nsec = tv->tv_nsec;
796 unsigned long flags;
092b8f34
PM
797 u64 new_xsec;
798 unsigned long tb_delta;
1da177e4
LT
799
800 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
801 return -EINVAL;
802
803 write_seqlock_irqsave(&xtime_lock, flags);
f2783c15
PM
804
805 /*
806 * Updating the RTC is not the job of this code. If the time is
807 * stepped under NTP, the RTC will be updated after STA_UNSYNC
808 * is cleared. Tools like clock/hwclock either copy the RTC
1da177e4
LT
809 * to the system time, in which case there is no point in writing
810 * to the RTC again, or write to the RTC but then they don't call
811 * settimeofday to perform this operation.
812 */
813#ifdef CONFIG_PPC_ISERIES
f2783c15 814 if (first_settimeofday) {
1da177e4
LT
815 iSeries_tb_recal();
816 first_settimeofday = 0;
817 }
818#endif
092b8f34 819
0a45d449
PM
820 /* Make userspace gettimeofday spin until we're done. */
821 ++vdso_data->tb_update_count;
822 smp_mb();
823
092b8f34
PM
824 /*
825 * Subtract off the number of nanoseconds since the
826 * beginning of the last tick.
827 * Note that since we don't increment jiffies_64 anywhere other
828 * than in do_timer (since we don't have a lost tick problem),
829 * wall_jiffies will always be the same as jiffies,
830 * and therefore the (jiffies - wall_jiffies) computation
831 * has been removed.
832 */
1da177e4 833 tb_delta = tb_ticks_since(tb_last_stamp);
092b8f34
PM
834 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
835 new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
1da177e4
LT
836
837 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
838 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
839
840 set_normalized_timespec(&xtime, new_sec, new_nsec);
841 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
842
843 /* In case of a large backwards jump in time with NTP, we want the
844 * clock to be updated as soon as the PLL is again in lock.
845 */
846 last_rtc_update = new_sec - 658;
847
b149ee22 848 ntp_clear();
1da177e4 849
092b8f34
PM
850 new_xsec = xtime.tv_nsec;
851 if (new_xsec != 0) {
852 new_xsec *= XSEC_PER_SEC;
5f6b5b97
PM
853 do_div(new_xsec, NSEC_PER_SEC);
854 }
092b8f34 855 new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;
96c44507 856 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
1da177e4 857
a7f290da
BH
858 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
859 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
1da177e4
LT
860
861 write_sequnlock_irqrestore(&xtime_lock, flags);
862 clock_was_set();
863 return 0;
864}
865
866EXPORT_SYMBOL(do_settimeofday);
867
0bb474a4 868static int __init get_freq(char *name, int cells, unsigned long *val)
10f7e7c1
AB
869{
870 struct device_node *cpu;
10f7e7c1 871 unsigned int *fp;
0bb474a4 872 int found = 0;
10f7e7c1 873
0bb474a4 874 /* The cpu node should have timebase and clock frequency properties */
10f7e7c1
AB
875 cpu = of_find_node_by_type(NULL, "cpu");
876
d8a8188d 877 if (cpu) {
0bb474a4 878 fp = (unsigned int *)get_property(cpu, name, NULL);
d8a8188d 879 if (fp) {
0bb474a4
AB
880 found = 1;
881 *val = 0;
882 while (cells--)
883 *val = (*val << 32) | *fp++;
10f7e7c1 884 }
0bb474a4
AB
885
886 of_node_put(cpu);
10f7e7c1 887 }
0bb474a4
AB
888
889 return found;
890}
891
892void __init generic_calibrate_decr(void)
893{
894 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
895
896 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
897 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
898
10f7e7c1
AB
899 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
900 "(not found)\n");
0bb474a4 901 }
10f7e7c1 902
0bb474a4
AB
903 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
904
905 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
906 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
907
908 printk(KERN_ERR "WARNING: Estimating processor frequency "
909 "(not found)\n");
10f7e7c1 910 }
0bb474a4 911
0fd6f717
KG
912#ifdef CONFIG_BOOKE
913 /* Set the time base to zero */
914 mtspr(SPRN_TBWL, 0);
915 mtspr(SPRN_TBWU, 0);
916
917 /* Clear any pending timer interrupts */
918 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
919
920 /* Enable decrementer interrupt */
921 mtspr(SPRN_TCR, TCR_DIE);
922#endif
10f7e7c1 923}
10f7e7c1 924
f2783c15
PM
925unsigned long get_boot_time(void)
926{
927 struct rtc_time tm;
928
929 if (ppc_md.get_boot_time)
930 return ppc_md.get_boot_time();
931 if (!ppc_md.get_rtc_time)
932 return 0;
933 ppc_md.get_rtc_time(&tm);
934 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
935 tm.tm_hour, tm.tm_min, tm.tm_sec);
936}
937
938/* This function is only called on the boot processor */
1da177e4
LT
939void __init time_init(void)
940{
1da177e4 941 unsigned long flags;
f2783c15 942 unsigned long tm = 0;
1da177e4 943 struct div_result res;
092b8f34 944 u64 scale, x;
f2783c15
PM
945 unsigned shift;
946
947 if (ppc_md.time_init != NULL)
948 timezone_offset = ppc_md.time_init();
1da177e4 949
96c44507
PM
950 if (__USE_RTC()) {
951 /* 601 processor: dec counts down by 128 every 128ns */
952 ppc_tb_freq = 1000000000;
953 tb_last_stamp = get_rtcl();
954 tb_last_jiffy = tb_last_stamp;
955 } else {
956 /* Normal PowerPC with timebase register */
957 ppc_md.calibrate_decr();
224ad80a 958 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
96c44507 959 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
224ad80a 960 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
96c44507
PM
961 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
962 tb_last_stamp = tb_last_jiffy = get_tb();
963 }
374e99d4
PM
964
965 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
092b8f34 966 tb_ticks_per_sec = ppc_tb_freq;
374e99d4
PM
967 tb_ticks_per_usec = ppc_tb_freq / 1000000;
968 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
c6622f63 969 calc_cputime_factors();
092b8f34
PM
970
971 /*
972 * Calculate the length of each tick in ns. It will not be
973 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
974 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
975 * rounded up.
976 */
977 x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
978 do_div(x, ppc_tb_freq);
979 tick_nsec = x;
980 last_tick_len = x << TICKLEN_SCALE;
981
982 /*
983 * Compute ticklen_to_xs, which is a factor which gets multiplied
984 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
985 * It is computed as:
986 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
987 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
0a45d449
PM
988 * which turns out to be N = 51 - SHIFT_HZ.
989 * This gives the result as a 0.64 fixed-point fraction.
990 * That value is reduced by an offset amounting to 1 xsec per
991 * 2^31 timebase ticks to avoid problems with time going backwards
992 * by 1 xsec when we do timer_recalc_offset due to losing the
993 * fractional xsec. That offset is equal to ppc_tb_freq/2^51
994 * since there are 2^20 xsec in a second.
092b8f34 995 */
0a45d449
PM
996 div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
997 tb_ticks_per_jiffy << SHIFT_HZ, &res);
092b8f34
PM
998 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
999 ticklen_to_xs = res.result_low;
1000
1001 /* Compute tb_to_xs from tick_nsec */
1002 tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
374e99d4 1003
1da177e4
LT
1004 /*
1005 * Compute scale factor for sched_clock.
1006 * The calibrate_decr() function has set tb_ticks_per_sec,
1007 * which is the timebase frequency.
1008 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
1009 * the 128-bit result as a 64.64 fixed-point number.
1010 * We then shift that number right until it is less than 1.0,
1011 * giving us the scale factor and shift count to use in
1012 * sched_clock().
1013 */
1014 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
1015 scale = res.result_low;
1016 for (shift = 0; res.result_high != 0; ++shift) {
1017 scale = (scale >> 1) | (res.result_high << 63);
1018 res.result_high >>= 1;
1019 }
1020 tb_to_ns_scale = scale;
1021 tb_to_ns_shift = shift;
1022
4bd174fe 1023 tm = get_boot_time();
1da177e4
LT
1024
1025 write_seqlock_irqsave(&xtime_lock, flags);
092b8f34
PM
1026
1027 /* If platform provided a timezone (pmac), we correct the time */
1028 if (timezone_offset) {
1029 sys_tz.tz_minuteswest = -timezone_offset / 60;
1030 sys_tz.tz_dsttime = 0;
1031 tm -= timezone_offset;
1032 }
1033
f2783c15
PM
1034 xtime.tv_sec = tm;
1035 xtime.tv_nsec = 0;
1da177e4
LT
1036 do_gtod.varp = &do_gtod.vars[0];
1037 do_gtod.var_idx = 0;
96c44507 1038 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
f2783c15
PM
1039 __get_cpu_var(last_jiffy) = tb_last_stamp;
1040 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
1da177e4
LT
1041 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
1042 do_gtod.varp->tb_to_xs = tb_to_xs;
1043 do_gtod.tb_to_us = tb_to_us;
a7f290da
BH
1044
1045 vdso_data->tb_orig_stamp = tb_last_jiffy;
1046 vdso_data->tb_update_count = 0;
1047 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
092b8f34 1048 vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
a7f290da 1049 vdso_data->tb_to_xs = tb_to_xs;
1da177e4
LT
1050
1051 time_freq = 0;
1052
1da177e4
LT
1053 last_rtc_update = xtime.tv_sec;
1054 set_normalized_timespec(&wall_to_monotonic,
1055 -xtime.tv_sec, -xtime.tv_nsec);
1056 write_sequnlock_irqrestore(&xtime_lock, flags);
1057
1058 /* Not exact, but the timer interrupt takes care of this */
1059 set_dec(tb_ticks_per_jiffy);
1060}
1061
1da177e4 1062
1da177e4
LT
1063#define FEBRUARY 2
1064#define STARTOFTIME 1970
1065#define SECDAY 86400L
1066#define SECYR (SECDAY * 365)
f2783c15
PM
1067#define leapyear(year) ((year) % 4 == 0 && \
1068 ((year) % 100 != 0 || (year) % 400 == 0))
1da177e4
LT
1069#define days_in_year(a) (leapyear(a) ? 366 : 365)
1070#define days_in_month(a) (month_days[(a) - 1])
1071
1072static int month_days[12] = {
1073 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1074};
1075
1076/*
1077 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1078 */
1079void GregorianDay(struct rtc_time * tm)
1080{
1081 int leapsToDate;
1082 int lastYear;
1083 int day;
1084 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
1085
f2783c15 1086 lastYear = tm->tm_year - 1;
1da177e4
LT
1087
1088 /*
1089 * Number of leap corrections to apply up to end of last year
1090 */
f2783c15 1091 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
1da177e4
LT
1092
1093 /*
1094 * This year is a leap year if it is divisible by 4 except when it is
1095 * divisible by 100 unless it is divisible by 400
1096 *
f2783c15 1097 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1da177e4 1098 */
f2783c15 1099 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
1da177e4
LT
1100
1101 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
1102 tm->tm_mday;
1103
f2783c15 1104 tm->tm_wday = day % 7;
1da177e4
LT
1105}
1106
1107void to_tm(int tim, struct rtc_time * tm)
1108{
1109 register int i;
1110 register long hms, day;
1111
1112 day = tim / SECDAY;
1113 hms = tim % SECDAY;
1114
1115 /* Hours, minutes, seconds are easy */
1116 tm->tm_hour = hms / 3600;
1117 tm->tm_min = (hms % 3600) / 60;
1118 tm->tm_sec = (hms % 3600) % 60;
1119
1120 /* Number of years in days */
1121 for (i = STARTOFTIME; day >= days_in_year(i); i++)
1122 day -= days_in_year(i);
1123 tm->tm_year = i;
1124
1125 /* Number of months in days left */
1126 if (leapyear(tm->tm_year))
1127 days_in_month(FEBRUARY) = 29;
1128 for (i = 1; day >= days_in_month(i); i++)
1129 day -= days_in_month(i);
1130 days_in_month(FEBRUARY) = 28;
1131 tm->tm_mon = i;
1132
1133 /* Days are what is left over (+1) from all that. */
1134 tm->tm_mday = day + 1;
1135
1136 /*
1137 * Determine the day of week
1138 */
1139 GregorianDay(tm);
1140}
1141
1142/* Auxiliary function to compute scaling factors */
1143/* Actually the choice of a timebase running at 1/4 the of the bus
1144 * frequency giving resolution of a few tens of nanoseconds is quite nice.
1145 * It makes this computation very precise (27-28 bits typically) which
1146 * is optimistic considering the stability of most processor clock
1147 * oscillators and the precision with which the timebase frequency
1148 * is measured but does not harm.
1149 */
f2783c15
PM
1150unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
1151{
1da177e4
LT
1152 unsigned mlt=0, tmp, err;
1153 /* No concern for performance, it's done once: use a stupid
1154 * but safe and compact method to find the multiplier.
1155 */
1156
1157 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
f2783c15
PM
1158 if (mulhwu(inscale, mlt|tmp) < outscale)
1159 mlt |= tmp;
1da177e4
LT
1160 }
1161
1162 /* We might still be off by 1 for the best approximation.
1163 * A side effect of this is that if outscale is too large
1164 * the returned value will be zero.
1165 * Many corner cases have been checked and seem to work,
1166 * some might have been forgotten in the test however.
1167 */
1168
f2783c15
PM
1169 err = inscale * (mlt+1);
1170 if (err <= inscale/2)
1171 mlt++;
1da177e4 1172 return mlt;
f2783c15 1173}
1da177e4
LT
1174
1175/*
1176 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1177 * result.
1178 */
f2783c15
PM
1179void div128_by_32(u64 dividend_high, u64 dividend_low,
1180 unsigned divisor, struct div_result *dr)
1da177e4 1181{
f2783c15
PM
1182 unsigned long a, b, c, d;
1183 unsigned long w, x, y, z;
1184 u64 ra, rb, rc;
1da177e4
LT
1185
1186 a = dividend_high >> 32;
1187 b = dividend_high & 0xffffffff;
1188 c = dividend_low >> 32;
1189 d = dividend_low & 0xffffffff;
1190
f2783c15
PM
1191 w = a / divisor;
1192 ra = ((u64)(a - (w * divisor)) << 32) + b;
1193
f2783c15
PM
1194 rb = ((u64) do_div(ra, divisor) << 32) + c;
1195 x = ra;
1da177e4 1196
f2783c15
PM
1197 rc = ((u64) do_div(rb, divisor) << 32) + d;
1198 y = rb;
1199
1200 do_div(rc, divisor);
1201 z = rc;
1da177e4 1202
f2783c15
PM
1203 dr->result_high = ((u64)w << 32) + x;
1204 dr->result_low = ((u64)y << 32) + z;
1da177e4
LT
1205
1206}