* Make sure the above info reaches the primary CPU before
* our cpu_in_kgdb[] flag setting does:
*/
- smp_wmb();
- atomic_set(&cpu_in_kgdb[cpu], 1);
+ atomic_inc(&cpu_in_kgdb[cpu]);
/*
* CPU will loop if it is a slave or request to become a kgdb
*/
if (arch_kgdb_ops.correct_hw_break)
arch_kgdb_ops.correct_hw_break();
- atomic_set(&cpu_in_kgdb[cpu], 0);
+ atomic_dec(&cpu_in_kgdb[cpu]);
touch_softlockup_watchdog_sync();
clocksource_touch_watchdog();
local_irq_restore(flags);
*/
if (!kgdb_single_step) {
for (i = 0; i < NR_CPUS; i++)
- atomic_set(&passive_cpu_wait[i], 1);
+ atomic_inc(&passive_cpu_wait[i]);
}
#ifdef CONFIG_SMP
if (kgdb_io_ops->post_exception)
kgdb_io_ops->post_exception();
- atomic_set(&cpu_in_kgdb[ks->cpu], 0);
+ atomic_dec(&cpu_in_kgdb[ks->cpu]);
if (!kgdb_single_step) {
for (i = NR_CPUS-1; i >= 0; i--)
- atomic_set(&passive_cpu_wait[i], 0);
+ atomic_dec(&passive_cpu_wait[i]);
/*
* Wait till all the CPUs have quit
* from the debugger.
*/
void kgdb_breakpoint(void)
{
- atomic_set(&kgdb_setting_breakpoint, 1);
+ atomic_inc(&kgdb_setting_breakpoint);
wmb(); /* Sync point before breakpoint */
arch_kgdb_breakpoint();
wmb(); /* Sync point after breakpoint */
- atomic_set(&kgdb_setting_breakpoint, 0);
+ atomic_dec(&kgdb_setting_breakpoint);
}
EXPORT_SYMBOL_GPL(kgdb_breakpoint);