2 * sched_clock.c: support for extending counters to full 64-bit ns counter
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #include <linux/clocksource.h>
9 #include <linux/init.h>
10 #include <linux/jiffies.h>
11 #include <linux/ktime.h>
12 #include <linux/kernel.h>
13 #include <linux/moduleparam.h>
14 #include <linux/sched.h>
15 #include <linux/syscore_ops.h>
16 #include <linux/hrtimer.h>
17 #include <linux/sched_clock.h>
18 #include <linux/seqlock.h>
19 #include <linux/bitops.h>
25 u64 epoch_cyc_copy
; /*add this to protect read & write share data (cd.epoch_ns/cd.epoch_cyc) sync (same as arch/arm/sched_clock.c)*/
33 static struct hrtimer sched_clock_timer
;
34 static int irqtime
= -1;
36 core_param(irqtime
, irqtime
, int, 0400);
38 static struct clock_data cd
= {
39 .mult
= NSEC_PER_SEC
/ HZ
,
42 static u64 __read_mostly sched_clock_mask
;
44 static u64 notrace
jiffy_sched_clock_read(void)
47 * We don't need to use get_jiffies_64 on 32-bit arches here
48 * because we register with BITS_PER_LONG
50 return (u64
)(jiffies
- INITIAL_JIFFIES
);
53 static u64
__read_mostly (*read_sched_clock
)(void) = jiffy_sched_clock_read
;
55 static inline u64 notrace
cyc_to_ns(u64 cyc
, u32 mult
, u32 shift
)
57 return (cyc
* mult
) >> shift
;
60 unsigned long long notrace
sched_clock(void)
70 * used cd.epoch_cyc_copy instead of read/write seqcount
71 * because of read/write seqcount cannot sync the read/write flow
72 * race condition will happen for epoch_cyc/epoch_ns
75 //seq = raw_read_seqcount_begin(&cd.seq);
76 //seq = read_seqcount_begin(&cd.seq);
77 epoch_cyc
= cd
.epoch_cyc
;
79 epoch_ns
= cd
.epoch_ns
;
81 } while (epoch_cyc
!= cd
.epoch_cyc_copy
);
83 cyc
= read_sched_clock();
84 cyc
= (cyc
- epoch_cyc
) & sched_clock_mask
;
85 return epoch_ns
+ cyc_to_ns(cyc
, cd
.mult
, cd
.shift
);
89 * Atomically update the sched_clock epoch.
91 static void notrace
update_sched_clock(void)
97 cyc
= read_sched_clock();
99 cyc_to_ns((cyc
- cd
.epoch_cyc
) & sched_clock_mask
,
102 raw_local_irq_save(flags
);
104 * same with sched_clock() comment
107 //raw_write_seqcount_begin(&cd.seq);
108 //write_seqcount_begin(&cd.seq);
109 cd
.epoch_cyc_copy
= cyc
;
114 //raw_write_seqcount_end(&cd.seq);
115 //write_seqcount_end(&cd.seq);
116 raw_local_irq_restore(flags
);
119 static enum hrtimer_restart
sched_clock_poll(struct hrtimer
*hrt
)
121 update_sched_clock();
122 hrtimer_forward_now(hrt
, cd
.wrap_kt
);
123 return HRTIMER_RESTART
;
126 void __init
sched_clock_register(u64 (*read
)(void), int bits
,
129 u64 res
, wrap
, new_mask
, new_epoch
, cyc
, ns
;
130 u32 new_mult
, new_shift
;
138 WARN_ON(!irqs_disabled());
140 /* calculate the mult/shift to convert counter ticks to ns. */
141 clocks_calc_mult_shift(&new_mult
, &new_shift
, rate
, NSEC_PER_SEC
, 0);
143 //new_mask = CLOCKSOURCE_MASK(bits);
144 /* calculate how many ns until we wrap */
145 //wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask);
148 new_mask
= (1ULL<<(bits
))-1;
149 wrap
= cyc_to_ns((1ULL << bits
) - 1, new_mult
, new_shift
);
151 new_wrap_kt
= ns_to_ktime(wrap
- (wrap
>> 3));
153 /* update epoch for new counter and update epoch_ns from old counter*/
155 cyc
= read_sched_clock();
156 ns
= cd
.epoch_ns
+ cyc_to_ns((cyc
- cd
.epoch_cyc
) & sched_clock_mask
,
159 //raw_write_seqcount_begin(&cd.seq);
160 write_seqcount_begin(&cd
.seq
);
161 read_sched_clock
= read
;
162 sched_clock_mask
= new_mask
;
164 cd
.wrap_kt
= new_wrap_kt
;
166 cd
.shift
= new_shift
;
167 cd
.epoch_cyc
= new_epoch
;
169 //raw_write_seqcount_end(&cd.seq);
170 write_seqcount_end(&cd
.seq
);
176 } else if (r
>= 1000) {
182 /* calculate the ns resolution of this counter */
183 res
= cyc_to_ns(1ULL, new_mult
, new_shift
);
185 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
186 bits
, r
, r_unit
, res
, wrap
);
188 /* Enable IRQ time accounting if we have a fast enough sched_clock */
189 if (irqtime
> 0 || (irqtime
== -1 && rate
>= 1000000))
190 enable_sched_clock_irqtime();
192 pr_debug("Registered %pF as sched_clock source\n", read
);
195 void __init
sched_clock_postinit(void)
198 * If no sched_clock function has been provided at that point,
199 * make it the final one one.
201 if (read_sched_clock
== jiffy_sched_clock_read
)
202 sched_clock_register(jiffy_sched_clock_read
, BITS_PER_LONG
, HZ
);
204 update_sched_clock();
207 * Start the timer to keep sched_clock() properly updated and
208 * sets the initial epoch.
210 hrtimer_init(&sched_clock_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
211 sched_clock_timer
.function
= sched_clock_poll
;
212 hrtimer_start(&sched_clock_timer
, cd
.wrap_kt
, HRTIMER_MODE_REL
);
215 static int sched_clock_suspend(void)
217 update_sched_clock();
218 hrtimer_cancel(&sched_clock_timer
);
223 static void sched_clock_resume(void)
225 cd
.epoch_cyc
= read_sched_clock();
226 cd
.epoch_cyc_copy
= cd
.epoch_cyc
;
227 hrtimer_start(&sched_clock_timer
, cd
.wrap_kt
, HRTIMER_MODE_REL
);
228 cd
.suspended
= false;
231 static struct syscore_ops sched_clock_ops
= {
232 .suspend
= sched_clock_suspend
,
233 .resume
= sched_clock_resume
,
236 static int __init
sched_clock_syscore_init(void)
238 register_syscore_ops(&sched_clock_ops
);
241 device_initcall(sched_clock_syscore_init
);