{
enable_timer_nmi_watchdog();
unset_nmi_callback();
- synchronize_kernel();
+ synchronize_sched(); /* Allow already-started NMIs to complete. */
}
/* We now sleep until all other CPUs have scheduled. This ensures that
* the deletion is seen by all other CPUs, and that the deleted handler
* isn't still running on another CPU when we return. */
- synchronize_kernel();
+ synchronize_rcu();
}
}
return rc;
/* Fall back to the default idle loop */
pm_idle = pm_idle_save;
- synchronize_kernel();
+ synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
pr->flags.power = 0;
result = acpi_processor_get_power_info(pr);
/* Wait until we know that we are out of any interrupt
handlers might have been running before we freed the
interrupt. */
- synchronize_kernel();
+ synchronize_sched();
if (new_smi->si_sm) {
if (new_smi->handlers)
/* Wait until we know that we are out of any interrupt
handlers might have been running before we freed the
interrupt. */
- synchronize_kernel();
+ synchronize_sched();
/* Wait for the timer to stop. This avoids problems with race
conditions removing the timer here. */
atkbd_disable(atkbd);
/* make sure we don't have a command in flight */
- synchronize_kernel();
+ synchronize_sched(); /* Allow atkbd_interrupt()s to complete. */
flush_scheduled_work();
device_remove_file(&serio->dev, &atkbd_attr_extra);
goto abort;
}
p->rdev = NULL;
- synchronize_kernel();
+ synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */
err = -EBUSY;
goto abort;
}
p->rdev = NULL;
- synchronize_kernel();
+ synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */
err = -EBUSY;
goto abort;
}
p->rdev = NULL;
- synchronize_kernel();
+ synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */
err = -EBUSY;
goto abort;
}
p->rdev = NULL;
- synchronize_kernel();
+ synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */
err = -EBUSY;
goto abort;
}
p->rdev = NULL;
- synchronize_kernel();
+ synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */
err = -EBUSY;
}
/* Give a racing hard_start_xmit a few cycles to complete. */
- synchronize_kernel();
+ synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
/*
* And now for the 50k$ question: are IRQ disabled or not ?
else
ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0);
if (!ret)
- synchronize_kernel();
+ synchronize_sched(); /* Allow interrupts to complete. */
sprintf (dbf_txt, "ret:%d", ret);
CIO_TRACE_EVENT (4, dbf_txt);
ret = -EINVAL;
else {
adapter_handler = NULL;
- synchronize_kernel();
+ synchronize_sched(); /* Allow interrupts to complete. */
ret = 0;
}
sprintf (dbf_txt, "ret:%d", ret);
/* Init routine failed: abort. Try to protect us from
buggy refcounters. */
mod->state = MODULE_STATE_GOING;
- synchronize_kernel();
+ synchronize_sched();
if (mod->unsafe)
printk(KERN_ERR "%s: module is now stuck!\n",
mod->name);
WARN_ON(hook != timer_hook);
timer_hook = NULL;
/* make sure all CPUs see the NULL hook */
- synchronize_kernel();
+ synchronize_sched(); /* Allow ongoing interrupts to complete. */
}
EXPORT_SYMBOL_GPL(register_timer_hook);
}
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
- synchronize_kernel();
+ synchronize_rcu();
/* no cpu_online check required here since we clear the percpu
* array on cpu offline and set this to NULL.
void synchronize_net(void)
{
might_sleep();
- synchronize_kernel();
+ synchronize_rcu();
}
/**