From: Linus Torvalds Date: Sun, 11 May 2008 23:04:48 +0000 (-0700) Subject: Add new 'cond_resched_bkl()' helper function X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=c3921ab71507b108d51a0f1ee960f80cd668a93d;p=GitHub%2FLineageOS%2FG12%2Fandroid_kernel_amlogic_linux-4.9.git Add new 'cond_resched_bkl()' helper function It acts exactly like a regular 'cond_resched()', but will not get optimized away when CONFIG_PREEMPT is set. Normal kernel code is already preemptable in the presense of CONFIG_PREEMPT, so cond_resched() is optimized away (see commit 02b67cc3ba36bdba351d6c3a00593f4ec550d9d3 "sched: do not do cond_resched() when CONFIG_PREEMPT"). But when wanting to conditionally reschedule while holding a lock, you need to use "cond_sched_lock(lock)", and the new function is the BKL equivalent of that. Also make fs/locks.c use it. Signed-off-by: Linus Torvalds --- diff --git a/fs/locks.c b/fs/locks.c index 0ac6b92cb0b6..11dbf08651b7 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -773,7 +773,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) * give it the opportunity to lock the file. */ if (found) - cond_resched(); + cond_resched_bkl(); find_conflict: for_each_lock(inode, before) { diff --git a/include/linux/sched.h b/include/linux/sched.h index 0c35b0343a76..4ab9f32f9238 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2037,13 +2037,13 @@ static inline int need_resched(void) * cond_resched_lock() will drop the spinlock before scheduling, * cond_resched_softirq() will enable bhs before scheduling. */ +extern int _cond_resched(void); #ifdef CONFIG_PREEMPT static inline int cond_resched(void) { return 0; } #else -extern int _cond_resched(void); static inline int cond_resched(void) { return _cond_resched(); @@ -2051,6 +2051,10 @@ static inline int cond_resched(void) #endif extern int cond_resched_lock(spinlock_t * lock); extern int cond_resched_softirq(void); +static inline int cond_resched_bkl(void) +{ + return _cond_resched(); +} /* * Does a critical section need to be broken due to another diff --git a/kernel/sched.c b/kernel/sched.c index c51b6565e07c..8841a915545d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5525,7 +5525,6 @@ static void __cond_resched(void) } while (need_resched()); } -#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY) int __sched _cond_resched(void) { if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) && @@ -5536,7 +5535,6 @@ int __sched _cond_resched(void) return 0; } EXPORT_SYMBOL(_cond_resched); -#endif /* * cond_resched_lock() - if a reschedule is pending, drop the given lock,