include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / xen / time.c
CommitLineData
15c84731
JF
1/*
2 * Xen time implementation.
3 *
4 * This is implemented in terms of a clocksource driver which uses
5 * the hypervisor clock as a nanosecond timebase, and a clockevent
6 * driver which uses the hypervisor's timer mechanism.
7 *
8 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
9 */
10#include <linux/kernel.h>
11#include <linux/interrupt.h>
12#include <linux/clocksource.h>
13#include <linux/clockchips.h>
f91a8b44 14#include <linux/kernel_stat.h>
f595ec96 15#include <linux/math64.h>
5a0e3ad6 16#include <linux/gfp.h>
15c84731 17
1c7b67f7 18#include <asm/pvclock.h>
15c84731
JF
19#include <asm/xen/hypervisor.h>
20#include <asm/xen/hypercall.h>
21
22#include <xen/events.h>
23#include <xen/interface/xen.h>
24#include <xen/interface/vcpu.h>
25
26#include "xen-ops.h"
27
28#define XEN_SHIFT 22
29
30/* Xen may fire a timer up to this many ns early */
31#define TIMER_SLOP 100000
f91a8b44 32#define NS_PER_TICK (1000000000LL / HZ)
15c84731 33
f91a8b44 34/* runstate info updated by Xen */
c6e22f9e 35static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
f91a8b44
JF
36
37/* snapshots of runstate info */
c6e22f9e 38static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
f91a8b44
JF
39
40/* unused ns of stolen and blocked time */
c6e22f9e
TH
41static DEFINE_PER_CPU(u64, xen_residual_stolen);
42static DEFINE_PER_CPU(u64, xen_residual_blocked);
f91a8b44
JF
43
44/* return an consistent snapshot of 64-bit time/counter value */
45static u64 get64(const u64 *p)
46{
47 u64 ret;
48
49 if (BITS_PER_LONG < 64) {
50 u32 *p32 = (u32 *)p;
51 u32 h, l;
52
53 /*
54 * Read high then low, and then make sure high is
55 * still the same; this will only loop if low wraps
56 * and carries into high.
57 * XXX some clean way to make this endian-proof?
58 */
59 do {
60 h = p32[1];
61 barrier();
62 l = p32[0];
63 barrier();
64 } while (p32[1] != h);
65
66 ret = (((u64)h) << 32) | l;
67 } else
68 ret = *p;
69
70 return ret;
71}
72
73/*
74 * Runstate accounting
75 */
76static void get_runstate_snapshot(struct vcpu_runstate_info *res)
77{
78 u64 state_time;
79 struct vcpu_runstate_info *state;
80
f120f13e 81 BUG_ON(preemptible());
f91a8b44 82
c6e22f9e 83 state = &__get_cpu_var(xen_runstate);
f91a8b44
JF
84
85 /*
86 * The runstate info is always updated by the hypervisor on
87 * the current CPU, so there's no need to use anything
88 * stronger than a compiler barrier when fetching it.
89 */
90 do {
91 state_time = get64(&state->state_entry_time);
92 barrier();
93 *res = *state;
94 barrier();
95 } while (get64(&state->state_entry_time) != state_time);
f91a8b44
JF
96}
97
f0d73394
JF
98/* return true when a vcpu could run but has no real cpu to run on */
99bool xen_vcpu_stolen(int vcpu)
100{
c6e22f9e 101 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
f0d73394
JF
102}
103
be012920 104void xen_setup_runstate_info(int cpu)
f91a8b44
JF
105{
106 struct vcpu_register_runstate_memory_area area;
107
c6e22f9e 108 area.addr.v = &per_cpu(xen_runstate, cpu);
f91a8b44
JF
109
110 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
111 cpu, &area))
112 BUG();
113}
114
115static void do_stolen_accounting(void)
116{
117 struct vcpu_runstate_info state;
118 struct vcpu_runstate_info *snap;
119 s64 blocked, runnable, offline, stolen;
120 cputime_t ticks;
121
122 get_runstate_snapshot(&state);
123
124 WARN_ON(state.state != RUNSTATE_running);
125
c6e22f9e 126 snap = &__get_cpu_var(xen_runstate_snapshot);
f91a8b44
JF
127
128 /* work out how much time the VCPU has not been runn*ing* */
129 blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
130 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
131 offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
132
133 *snap = state;
134
135 /* Add the appropriate number of ticks of stolen time,
79741dd3 136 including any left-overs from last time. */
c6e22f9e 137 stolen = runnable + offline + __get_cpu_var(xen_residual_stolen);
f91a8b44
JF
138
139 if (stolen < 0)
140 stolen = 0;
141
f595ec96 142 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
c6e22f9e 143 __get_cpu_var(xen_residual_stolen) = stolen;
79741dd3 144 account_steal_ticks(ticks);
f91a8b44
JF
145
146 /* Add the appropriate number of ticks of blocked time,
79741dd3 147 including any left-overs from last time. */
c6e22f9e 148 blocked += __get_cpu_var(xen_residual_blocked);
f91a8b44
JF
149
150 if (blocked < 0)
151 blocked = 0;
152
f595ec96 153 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
c6e22f9e 154 __get_cpu_var(xen_residual_blocked) = blocked;
79741dd3 155 account_idle_ticks(ticks);
f91a8b44
JF
156}
157
ab550288
JF
158/*
159 * Xen sched_clock implementation. Returns the number of unstolen
160 * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
161 * states.
162 */
163unsigned long long xen_sched_clock(void)
164{
165 struct vcpu_runstate_info state;
f120f13e
JF
166 cycle_t now;
167 u64 ret;
ab550288
JF
168 s64 offset;
169
f120f13e
JF
170 /*
171 * Ideally sched_clock should be called on a per-cpu basis
172 * anyway, so preempt should already be disabled, but that's
173 * not current practice at the moment.
174 */
175 preempt_disable();
176
177 now = xen_clocksource_read();
178
ab550288
JF
179 get_runstate_snapshot(&state);
180
181 WARN_ON(state.state != RUNSTATE_running);
182
183 offset = now - state.state_entry_time;
184 if (offset < 0)
185 offset = 0;
186
f120f13e 187 ret = state.time[RUNSTATE_blocked] +
ab550288
JF
188 state.time[RUNSTATE_running] +
189 offset;
f120f13e
JF
190
191 preempt_enable();
192
193 return ret;
ab550288 194}
f91a8b44
JF
195
196
e93ef949
AK
197/* Get the TSC speed from Xen */
198unsigned long xen_tsc_khz(void)
15c84731 199{
3807f345 200 struct pvclock_vcpu_time_info *info =
15c84731
JF
201 &HYPERVISOR_shared_info->vcpu_info[0].time;
202
3807f345 203 return pvclock_tsc_khz(info);
15c84731
JF
204}
205
ee7686bc 206cycle_t xen_clocksource_read(void)
15c84731 207{
1c7b67f7 208 struct pvclock_vcpu_time_info *src;
15c84731 209 cycle_t ret;
15c84731 210
1c7b67f7
GH
211 src = &get_cpu_var(xen_vcpu)->time;
212 ret = pvclock_clocksource_read(src);
213 put_cpu_var(xen_vcpu);
15c84731
JF
214 return ret;
215}
216
8e19608e
MD
217static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
218{
219 return xen_clocksource_read();
220}
221
15c84731
JF
222static void xen_read_wallclock(struct timespec *ts)
223{
1c7b67f7
GH
224 struct shared_info *s = HYPERVISOR_shared_info;
225 struct pvclock_wall_clock *wall_clock = &(s->wc);
226 struct pvclock_vcpu_time_info *vcpu_time;
15c84731 227
1c7b67f7
GH
228 vcpu_time = &get_cpu_var(xen_vcpu)->time;
229 pvclock_read_wallclock(wall_clock, vcpu_time, ts);
230 put_cpu_var(xen_vcpu);
15c84731
JF
231}
232
233unsigned long xen_get_wallclock(void)
234{
235 struct timespec ts;
236
237 xen_read_wallclock(&ts);
15c84731
JF
238 return ts.tv_sec;
239}
240
241int xen_set_wallclock(unsigned long now)
242{
243 /* do nothing for domU */
244 return -1;
245}
246
247static struct clocksource xen_clocksource __read_mostly = {
248 .name = "xen",
249 .rating = 400,
8e19608e 250 .read = xen_clocksource_get_cycles,
15c84731
JF
251 .mask = ~0,
252 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
253 .shift = XEN_SHIFT,
254 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
255};
256
257/*
258 Xen clockevent implementation
259
260 Xen has two clockevent implementations:
261
262 The old timer_op one works with all released versions of Xen prior
263 to version 3.0.4. This version of the hypervisor provides a
264 single-shot timer with nanosecond resolution. However, sharing the
265 same event channel is a 100Hz tick which is delivered while the
266 vcpu is running. We don't care about or use this tick, but it will
267 cause the core time code to think the timer fired too soon, and
268 will end up resetting it each time. It could be filtered, but
269 doing so has complications when the ktime clocksource is not yet
270 the xen clocksource (ie, at boot time).
271
272 The new vcpu_op-based timer interface allows the tick timer period
273 to be changed or turned off. The tick timer is not useful as a
274 periodic timer because events are only delivered to running vcpus.
275 The one-shot timer can report when a timeout is in the past, so
276 set_next_event is capable of returning -ETIME when appropriate.
277 This interface is used when available.
278*/
279
280
281/*
282 Get a hypervisor absolute time. In theory we could maintain an
283 offset between the kernel's time and the hypervisor's time, and
284 apply that to a kernel's absolute timeout. Unfortunately the
285 hypervisor and kernel times can drift even if the kernel is using
286 the Xen clocksource, because ntp can warp the kernel's clocksource.
287*/
288static s64 get_abs_timeout(unsigned long delta)
289{
290 return xen_clocksource_read() + delta;
291}
292
293static void xen_timerop_set_mode(enum clock_event_mode mode,
294 struct clock_event_device *evt)
295{
296 switch (mode) {
297 case CLOCK_EVT_MODE_PERIODIC:
298 /* unsupported */
299 WARN_ON(1);
300 break;
301
302 case CLOCK_EVT_MODE_ONESHOT:
18de5bc4 303 case CLOCK_EVT_MODE_RESUME:
15c84731
JF
304 break;
305
306 case CLOCK_EVT_MODE_UNUSED:
307 case CLOCK_EVT_MODE_SHUTDOWN:
308 HYPERVISOR_set_timer_op(0); /* cancel timeout */
309 break;
310 }
311}
312
313static int xen_timerop_set_next_event(unsigned long delta,
314 struct clock_event_device *evt)
315{
316 WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
317
318 if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
319 BUG();
320
321 /* We may have missed the deadline, but there's no real way of
322 knowing for sure. If the event was in the past, then we'll
323 get an immediate interrupt. */
324
325 return 0;
326}
327
328static const struct clock_event_device xen_timerop_clockevent = {
329 .name = "xen",
330 .features = CLOCK_EVT_FEAT_ONESHOT,
331
332 .max_delta_ns = 0xffffffff,
333 .min_delta_ns = TIMER_SLOP,
334
335 .mult = 1,
336 .shift = 0,
337 .rating = 500,
338
339 .set_mode = xen_timerop_set_mode,
340 .set_next_event = xen_timerop_set_next_event,
341};
342
343
344
345static void xen_vcpuop_set_mode(enum clock_event_mode mode,
346 struct clock_event_device *evt)
347{
348 int cpu = smp_processor_id();
349
350 switch (mode) {
351 case CLOCK_EVT_MODE_PERIODIC:
352 WARN_ON(1); /* unsupported */
353 break;
354
355 case CLOCK_EVT_MODE_ONESHOT:
356 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
357 BUG();
358 break;
359
360 case CLOCK_EVT_MODE_UNUSED:
361 case CLOCK_EVT_MODE_SHUTDOWN:
362 if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
363 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
364 BUG();
365 break;
18de5bc4
TG
366 case CLOCK_EVT_MODE_RESUME:
367 break;
15c84731
JF
368 }
369}
370
371static int xen_vcpuop_set_next_event(unsigned long delta,
372 struct clock_event_device *evt)
373{
374 int cpu = smp_processor_id();
375 struct vcpu_set_singleshot_timer single;
376 int ret;
377
378 WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
379
380 single.timeout_abs_ns = get_abs_timeout(delta);
381 single.flags = VCPU_SSHOTTMR_future;
382
383 ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
384
385 BUG_ON(ret != 0 && ret != -ETIME);
386
387 return ret;
388}
389
390static const struct clock_event_device xen_vcpuop_clockevent = {
391 .name = "xen",
392 .features = CLOCK_EVT_FEAT_ONESHOT,
393
394 .max_delta_ns = 0xffffffff,
395 .min_delta_ns = TIMER_SLOP,
396
397 .mult = 1,
398 .shift = 0,
399 .rating = 500,
400
401 .set_mode = xen_vcpuop_set_mode,
402 .set_next_event = xen_vcpuop_set_next_event,
403};
404
405static const struct clock_event_device *xen_clockevent =
406 &xen_timerop_clockevent;
407static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events);
408
409static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
410{
411 struct clock_event_device *evt = &__get_cpu_var(xen_clock_events);
412 irqreturn_t ret;
413
414 ret = IRQ_NONE;
415 if (evt->event_handler) {
416 evt->event_handler(evt);
417 ret = IRQ_HANDLED;
418 }
419
f91a8b44
JF
420 do_stolen_accounting();
421
15c84731
JF
422 return ret;
423}
424
f87e4cac 425void xen_setup_timer(int cpu)
15c84731
JF
426{
427 const char *name;
428 struct clock_event_device *evt;
429 int irq;
430
431 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
432
433 name = kasprintf(GFP_KERNEL, "timer%d", cpu);
434 if (!name)
435 name = "<timer kasprintf failed>";
436
437 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
f350c792 438 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER,
15c84731
JF
439 name, NULL);
440
f87e4cac 441 evt = &per_cpu(xen_clock_events, cpu);
15c84731
JF
442 memcpy(evt, xen_clockevent, sizeof(*evt));
443
320ab2b0 444 evt->cpumask = cpumask_of(cpu);
15c84731 445 evt->irq = irq;
f87e4cac
JF
446}
447
d68d82af
AN
448void xen_teardown_timer(int cpu)
449{
450 struct clock_event_device *evt;
451 BUG_ON(cpu == 0);
452 evt = &per_cpu(xen_clock_events, cpu);
453 unbind_from_irqhandler(evt->irq, NULL);
454}
455
f87e4cac
JF
456void xen_setup_cpu_clockevents(void)
457{
458 BUG_ON(preemptible());
f91a8b44 459
f87e4cac 460 clockevents_register_device(&__get_cpu_var(xen_clock_events));
15c84731
JF
461}
462
d07af1f0
JF
463void xen_timer_resume(void)
464{
465 int cpu;
466
467 if (xen_clockevent != &xen_vcpuop_clockevent)
468 return;
469
470 for_each_online_cpu(cpu) {
471 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
472 BUG();
473 }
474}
475
15c84731
JF
476__init void xen_time_init(void)
477{
478 int cpu = smp_processor_id();
479
15c84731
JF
480 clocksource_register(&xen_clocksource);
481
482 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
f91a8b44 483 /* Successfully turned off 100Hz tick, so we have the
15c84731
JF
484 vcpuop-based timer interface */
485 printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
486 xen_clockevent = &xen_vcpuop_clockevent;
487 }
488
489 /* Set initial system time with full resolution */
490 xen_read_wallclock(&xtime);
491 set_normalized_timespec(&wall_to_monotonic,
492 -xtime.tv_sec, -xtime.tv_nsec);
493
404ee5b1 494 setup_force_cpu_cap(X86_FEATURE_TSC);
15c84731 495
be012920 496 xen_setup_runstate_info(cpu);
15c84731 497 xen_setup_timer(cpu);
f87e4cac 498 xen_setup_cpu_clockevents();
15c84731 499}