timekeeper.mult = clock->mult;
}
+/* Timekeeper helper functions. */
+static inline s64 timekeeping_get_ns(void)
+{
+ cycle_t cycle_now, cycle_delta;
+ struct clocksource *clock;
+
+ /* read clocksource: */
+ clock = timekeeper.clock;
+ cycle_now = clock->read(clock);
+
+ /* calculate the delta since the last update_wall_time: */
+ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+
+ /* return delta convert to nanoseconds using ntp adjusted mult. */
+ return clocksource_cyc2ns(cycle_delta, timekeeper.mult,
+ timekeeper.shift);
+}
+
+static inline s64 timekeeping_get_ns_raw(void)
+{
+ cycle_t cycle_now, cycle_delta;
+ struct clocksource *clock;
+
+ /* read clocksource: */
+ clock = timekeeper.clock;
+ cycle_now = clock->read(clock);
+
+ /* calculate the delta since the last update_wall_time: */
+ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+
+ /* return delta convert to nanoseconds using ntp adjusted mult. */
+ return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
+}
+
/*
* This read-write spinlock protects us from races in SMP while
* playing with xtime.
*/
void getnstimeofday(struct timespec *ts)
{
- cycle_t cycle_now, cycle_delta;
- struct clocksource *clock;
unsigned long seq;
s64 nsecs;
seq = read_seqbegin(&xtime_lock);
*ts = xtime;
-
- /* read clocksource: */
- clock = timekeeper.clock;
- cycle_now = clock->read(clock);
-
- /* calculate the delta since the last update_wall_time: */
- cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
-
- /* convert to nanoseconds: */
- nsecs = clocksource_cyc2ns(cycle_delta, timekeeper.mult,
- timekeeper.shift);
+ nsecs = timekeeping_get_ns();
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
ktime_t ktime_get(void)
{
- cycle_t cycle_now, cycle_delta;
- struct clocksource *clock;
unsigned int seq;
s64 secs, nsecs;
seq = read_seqbegin(&xtime_lock);
secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
-
- /* read clocksource: */
- clock = timekeeper.clock;
- cycle_now = clock->read(clock);
-
- /* calculate the delta since the last update_wall_time: */
- cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
-
- /* convert to nanoseconds: */
- nsecs += clocksource_cyc2ns(cycle_delta, timekeeper.mult,
- timekeeper.shift);
+ nsecs += timekeeping_get_ns();
} while (read_seqretry(&xtime_lock, seq));
/*
*/
void ktime_get_ts(struct timespec *ts)
{
- cycle_t cycle_now, cycle_delta;
- struct clocksource *clock;
struct timespec tomono;
unsigned int seq;
s64 nsecs;
seq = read_seqbegin(&xtime_lock);
*ts = xtime;
tomono = wall_to_monotonic;
-
- /* read clocksource: */
- clock = timekeeper.clock;
- cycle_now = clock->read(clock);
-
- /* calculate the delta since the last update_wall_time: */
- cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
-
- /* convert to nanoseconds: */
- nsecs = clocksource_cyc2ns(cycle_delta, timekeeper.mult,
- timekeeper.shift);
+ nsecs = timekeeping_get_ns();
} while (read_seqretry(&xtime_lock, seq));
{
unsigned long seq;
s64 nsecs;
- cycle_t cycle_now, cycle_delta;
- struct clocksource *clock;
do {
seq = read_seqbegin(&xtime_lock);
-
- /* read clocksource: */
- clock = timekeeper.clock;
- cycle_now = clock->read(clock);
-
- /* calculate the delta since the last update_wall_time: */
- cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
-
- /* convert to nanoseconds: */
- nsecs = clocksource_cyc2ns(cycle_delta, clock->mult,
- clock->shift);
-
+ nsecs = timekeeping_get_ns_raw();
*ts = raw_time;
} while (read_seqretry(&xtime_lock, seq));