}
EXPORT_SYMBOL_GPL(clockevent_delta2ns);
-static int __clockevents_set_state(struct clock_event_device *dev,
- enum clock_event_state state)
+static int __clockevents_switch_state(struct clock_event_device *dev,
+ enum clock_event_state state)
{
/* Transition with legacy set_mode() callback */
if (dev->set_mode) {
}
/**
- * clockevents_set_state - set the operating state of a clock event device
+ * clockevents_switch_state - set the operating state of a clock event device
* @dev: device to modify
* @state: new state
*
* Must be called with interrupts disabled !
*/
-void clockevents_set_state(struct clock_event_device *dev,
- enum clock_event_state state)
+void clockevents_switch_state(struct clock_event_device *dev,
+ enum clock_event_state state)
{
if (dev->state != state) {
- if (__clockevents_set_state(dev, state))
+ if (__clockevents_switch_state(dev, state))
return;
dev->state = state;
*/
void clockevents_shutdown(struct clock_event_device *dev)
{
- clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
dev->next_event.tv64 = KTIME_MAX;
}
return clockevents_program_event(dev, dev->next_event, false);
if (clockevent_state_periodic(dev))
- return __clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
+ return __clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
return 0;
}
*/
if (old) {
module_put(old->owner);
- clockevents_set_state(old, CLOCK_EVT_STATE_DETACHED);
+ clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED);
list_del(&old->list);
list_add(&old->list, &clockevents_released);
}
ktime_t expires)
{
if (!clockevent_state_oneshot(bc))
- clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
+ clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
clockevents_program_event(bc, expires, 1);
tick_broadcast_set_affinity(bc, cpumask_of(cpu));
static void tick_resume_broadcast_oneshot(struct clock_event_device *bc)
{
- clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
+ clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
}
/*
* switched over, leave the device alone.
*/
if (td->mode == TICKDEV_MODE_ONESHOT) {
- clockevents_set_state(td->evtdev,
+ clockevents_switch_state(td->evtdev,
CLOCK_EVT_STATE_ONESHOT);
}
}
if (dev->next_event.tv64 < bc->next_event.tv64)
return;
}
- clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
}
/**
cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
} else {
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
- clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
/*
* The cpu which was handling the broadcast
* timer marked this cpu in the broadcast
tick_broadcast_oneshot_mask, tmpmask);
if (was_periodic && !cpumask_empty(tmpmask)) {
- clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
+ clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
tick_broadcast_init_next_event(tmpmask,
tick_next_period);
tick_broadcast_set_event(bc, cpu, tick_next_period);
if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
!tick_broadcast_oneshot_active()) {
- clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
} else {
unsigned long seq;
ktime_t next;
next = tick_next_period;
} while (read_seqretry(&jiffies_lock, seq));
- clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
for (;;) {
if (!clockevents_program_event(dev, next, false))
return !(dev->features & CLOCK_EVT_FEAT_DUMMY);
}
+static inline enum clock_event_state clockevent_get_state(struct clock_event_device *dev)
+{
+ return dev->state;
+}
+
+static inline void clockevent_set_state(struct clock_event_device *dev,
+ enum clock_event_state state)
+{
+ dev->state = state;
+}
+
extern void clockevents_shutdown(struct clock_event_device *dev);
extern void clockevents_exchange_device(struct clock_event_device *old,
struct clock_event_device *new);
-extern void clockevents_set_state(struct clock_event_device *dev,
- enum clock_event_state state);
+extern void clockevents_switch_state(struct clock_event_device *dev,
+ enum clock_event_state state);
extern int clockevents_program_event(struct clock_event_device *dev,
ktime_t expires, bool force);
extern void clockevents_handle_noop(struct clock_event_device *dev);
/*
* We don't need the clock event device any more, stop it.
*/
- clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT_STOPPED);
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT_STOPPED);
return 0;
}
* We need the clock event again, configure it in ONESHOT mode
* before using it.
*/
- clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
}
return clockevents_program_event(dev, expires, force);
{
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
- clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
clockevents_program_event(dev, ktime_get(), true);
}
ktime_t next_event)
{
newdev->event_handler = handler;
- clockevents_set_state(newdev, CLOCK_EVT_STATE_ONESHOT);
+ clockevents_switch_state(newdev, CLOCK_EVT_STATE_ONESHOT);
clockevents_program_event(newdev, next_event, true);
}
td->mode = TICKDEV_MODE_ONESHOT;
dev->event_handler = handler;
- clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
tick_broadcast_switch_to_oneshot();
return 0;
}