It acts exactly like a regular 'cond_resched()', but will not get
optimized away when CONFIG_PREEMPT is set.
Normal kernel code is already preemptable in the presense of
CONFIG_PREEMPT, so cond_resched() is optimized away (see commit
02b67cc3ba36bdba351d6c3a00593f4ec550d9d3 "sched: do not do
cond_resched() when CONFIG_PREEMPT").
But when wanting to conditionally reschedule while holding a lock, you
need to use "cond_sched_lock(lock)", and the new function is the BKL
equivalent of that.
Also make fs/locks.c use it.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
* give it the opportunity to lock the file.
*/
if (found)
- cond_resched();
+ cond_resched_bkl();
find_conflict:
for_each_lock(inode, before) {
* cond_resched_lock() will drop the spinlock before scheduling,
* cond_resched_softirq() will enable bhs before scheduling.
*/
+extern int _cond_resched(void);
#ifdef CONFIG_PREEMPT
static inline int cond_resched(void)
{
return 0;
}
#else
-extern int _cond_resched(void);
static inline int cond_resched(void)
{
return _cond_resched();
#endif
extern int cond_resched_lock(spinlock_t * lock);
extern int cond_resched_softirq(void);
+static inline int cond_resched_bkl(void)
+{
+ return _cond_resched();
+}
/*
* Does a critical section need to be broken due to another
} while (need_resched());
}
-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
int __sched _cond_resched(void)
{
if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
return 0;
}
EXPORT_SYMBOL(_cond_resched);
-#endif
/*
* cond_resched_lock() - if a reschedule is pending, drop the given lock,