*/
struct cpu_stop_done {
atomic_t nr_todo; /* nr left to execute */
- bool executed; /* actually executed? */
int ret; /* collected return value */
struct completion completion; /* fired if nr_todo reaches 0 */
};
}
/* signal completion unless @done is NULL */
-static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
+static void cpu_stop_signal_done(struct cpu_stop_done *done)
{
if (done) {
- if (executed)
- done->executed = true;
if (atomic_dec_and_test(&done->nr_todo))
complete(&done->completion);
}
if (enabled)
__cpu_stop_queue_work(stopper, work);
else
- cpu_stop_signal_done(work->done, false);
+ cpu_stop_signal_done(work->done);
spin_unlock_irqrestore(&stopper->lock, flags);
return enabled;
if (!cpu_stop_queue_work(cpu, &work))
return -ENOENT;
wait_for_completion(&done.completion);
- WARN_ON(!done.executed);
return done.ret;
}
return -ENOENT;
wait_for_completion(&done.completion);
- WARN_ON(!done.executed);
return done.ret;
}
if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
return -ENOENT;
wait_for_completion(&done.completion);
- WARN_ON(!done.executed);
return done.ret;
}
ret = fn(arg);
if (ret && done)
done->ret = ret;
+ cpu_stop_signal_done(done);
/* restore preemption and check it's still balanced */
preempt_enable();
kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
ksym_buf), arg);
- cpu_stop_signal_done(done, true);
goto repeat;
}
}