BKL: That's all, folks
authorArnd Bergmann <arnd@arndb.de>
Tue, 25 Jan 2011 21:52:22 +0000 (22:52 +0100)
committerArnd Bergmann <arnd@arndb.de>
Sat, 5 Mar 2011 09:56:00 +0000 (10:56 +0100)
This removes the implementation of the big kernel lock,
at last. A lot of people have worked on this in the
past, I so the credit for this patch should be with
everyone who participated in the hunt.

The names on the Cc list are the people that were the
most active in this, according to the recorded git
history, in alphabetical order.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Alan Cox <alan@linux.intel.com>
Cc: Alessio Igor Bogani <abogani@texware.it>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andrew Hendry <andrew.hendry@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Hans Verkuil <hverkuil@xs4all.nl>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Jan Blunck <jblunck@infradead.org>
Cc: John Kacur <jkacur@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matthew Wilcox <matthew@wil.cx>
Cc: Oliver Neukum <oliver@neukum.org>
Cc: Paul Menage <menage@google.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
include/linux/hardirq.h
include/linux/smp_lock.h [deleted file]
init/Kconfig
kernel/sched.c
lib/Kconfig.debug
lib/Makefile
lib/kernel_lock.c [deleted file]

index 32f9fd6619b4160c997bba07e2661382f3afaacf..ba362171e8aeb75076570647ee4c1d65e84d3773 100644 (file)
  */
 #define in_nmi()       (preempt_count() & NMI_MASK)
 
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_BKL)
-# include <linux/sched.h>
-# define PREEMPT_INATOMIC_BASE (current->lock_depth >= 0)
-#else
-# define PREEMPT_INATOMIC_BASE 0
-#endif
-
 #if defined(CONFIG_PREEMPT)
 # define PREEMPT_CHECK_OFFSET 1
 #else
  * used in the general case to determine whether sleeping is possible.
  * Do not use in_atomic() in driver code.
  */
-#define in_atomic()    ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_INATOMIC_BASE)
+#define in_atomic()    ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
 
 /*
  * Check whether we were atomic before we did preempt_disable():
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
deleted file mode 100644 (file)
index 3a19882..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef __LINUX_SMPLOCK_H
-#define __LINUX_SMPLOCK_H
-
-#ifdef CONFIG_LOCK_KERNEL
-#include <linux/sched.h>
-
-extern int __lockfunc __reacquire_kernel_lock(void);
-extern void __lockfunc __release_kernel_lock(void);
-
-/*
- * Release/re-acquire global kernel lock for the scheduler
- */
-#define release_kernel_lock(tsk) do {          \
-       if (unlikely((tsk)->lock_depth >= 0))   \
-               __release_kernel_lock();        \
-} while (0)
-
-static inline int reacquire_kernel_lock(struct task_struct *task)
-{
-       if (unlikely(task->lock_depth >= 0))
-               return __reacquire_kernel_lock();
-       return 0;
-}
-
-extern void __lockfunc
-_lock_kernel(const char *func, const char *file, int line)
-__acquires(kernel_lock);
-
-extern void __lockfunc
-_unlock_kernel(const char *func, const char *file, int line)
-__releases(kernel_lock);
-
-#define lock_kernel() do {                                     \
-       _lock_kernel(__func__, __FILE__, __LINE__);             \
-} while (0)
-
-#define unlock_kernel()        do {                                    \
-       _unlock_kernel(__func__, __FILE__, __LINE__);           \
-} while (0)
-
-/*
- * Various legacy drivers don't really need the BKL in a specific
- * function, but they *do* need to know that the BKL became available.
- * This function just avoids wrapping a bunch of lock/unlock pairs
- * around code which doesn't really need it.
- */
-static inline void cycle_kernel_lock(void)
-{
-       lock_kernel();
-       unlock_kernel();
-}
-
-#else
-
-#ifdef CONFIG_BKL /* provoke build bug if not set */
-#define lock_kernel()
-#define unlock_kernel()
-#define cycle_kernel_lock()                    do { } while(0)
-#endif /* CONFIG_BKL */
-
-#define release_kernel_lock(task)              do { } while(0)
-#define reacquire_kernel_lock(task)            0
-
-#endif /* CONFIG_LOCK_KERNEL */
-#endif /* __LINUX_SMPLOCK_H */
index be788c0957d4abac813eef18abe7822177542969..a88d1c919a4d1b5ae441d750a616796f0a588234 100644 (file)
@@ -69,11 +69,6 @@ config BROKEN_ON_SMP
        depends on BROKEN || !SMP
        default y
 
-config LOCK_KERNEL
-       bool
-       depends on (SMP || PREEMPT) && BKL
-       default y
-
 config INIT_ENV_ARG_LIMIT
        int
        default 32 if !UML
index 18d38e4ec7ba249ba46079beab5a773700a7eb3b..827c170c601799d715749f6ee90d60f44d52f36b 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/init.h>
 #include <linux/uaccess.h>
 #include <linux/highmem.h>
-#include <linux/smp_lock.h>
 #include <asm/mmu_context.h>
 #include <linux/interrupt.h>
 #include <linux/capability.h>
@@ -3945,9 +3944,6 @@ need_resched:
        rcu_note_context_switch(cpu);
        prev = rq->curr;
 
-       release_kernel_lock(prev);
-need_resched_nonpreemptible:
-
        schedule_debug(prev);
 
        if (sched_feat(HRTICK))
@@ -4010,9 +4006,6 @@ need_resched_nonpreemptible:
 
        post_schedule(rq);
 
-       if (unlikely(reacquire_kernel_lock(prev)))
-               goto need_resched_nonpreemptible;
-
        preempt_enable_no_resched();
        if (need_resched())
                goto need_resched;
@@ -8074,7 +8067,7 @@ static inline int preempt_count_equals(int preempt_offset)
 {
        int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
 
-       return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
+       return (nested == preempt_offset);
 }
 
 void __might_sleep(const char *file, int line, int preempt_offset)
index 2b97418c67e2f68f1ddf355cae1091ae1b3eafed..6f440d82b58db05e63d3a2ff9e32918e74974ebc 100644 (file)
@@ -470,15 +470,6 @@ config DEBUG_MUTEXES
         This feature allows mutex semantics violations to be detected and
         reported.
 
-config BKL
-       bool "Big Kernel Lock" if (SMP || PREEMPT)
-       default y
-       help
-         This is the traditional lock that is used in old code instead
-         of proper locking. All drivers that use the BKL should depend
-         on this symbol.
-         Say Y here unless you are working on removing the BKL.
-
 config DEBUG_LOCK_ALLOC
        bool "Lock debugging: detect incorrect freeing of live locks"
        depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
index cbb774f7d41d09846732d9f674328e0c9cd05086..de6c609bb4e427c162f3dd2fac2d12e4da7e0f7b 100644 (file)
@@ -43,7 +43,6 @@ obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
 CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
 obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
 
-obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
 obj-$(CONFIG_BTREE) += btree.o
 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
 obj-$(CONFIG_DEBUG_LIST) += list_debug.o
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
deleted file mode 100644 (file)
index d80e122..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * lib/kernel_lock.c
- *
- * This is the traditional BKL - big kernel lock. Largely
- * relegated to obsolescence, but used by various less
- * important (or lazy) subsystems.
- */
-#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/semaphore.h>
-#include <linux/smp_lock.h>
-
-/*
- * The 'big kernel lock'
- *
- * This spinlock is taken and released recursively by lock_kernel()
- * and unlock_kernel().  It is transparently dropped and reacquired
- * over schedule().  It is used to protect legacy code that hasn't
- * been migrated to a proper locking design yet.
- *
- * Don't use in new code.
- */
-static  __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
-
-
-/*
- * Acquire/release the underlying lock from the scheduler.
- *
- * This is called with preemption disabled, and should
- * return an error value if it cannot get the lock and
- * TIF_NEED_RESCHED gets set.
- *
- * If it successfully gets the lock, it should increment
- * the preemption count like any spinlock does.
- *
- * (This works on UP too - do_raw_spin_trylock will never
- * return false in that case)
- */
-int __lockfunc __reacquire_kernel_lock(void)
-{
-       while (!do_raw_spin_trylock(&kernel_flag)) {
-               if (need_resched())
-                       return -EAGAIN;
-               cpu_relax();
-       }
-       preempt_disable();
-       return 0;
-}
-
-void __lockfunc __release_kernel_lock(void)
-{
-       do_raw_spin_unlock(&kernel_flag);
-       preempt_enable_no_resched();
-}
-
-/*
- * These are the BKL spinlocks - we try to be polite about preemption.
- * If SMP is not on (ie UP preemption), this all goes away because the
- * do_raw_spin_trylock() will always succeed.
- */
-#ifdef CONFIG_PREEMPT
-static inline void __lock_kernel(void)
-{
-       preempt_disable();
-       if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
-               /*
-                * If preemption was disabled even before this
-                * was called, there's nothing we can be polite
-                * about - just spin.
-                */
-               if (preempt_count() > 1) {
-                       do_raw_spin_lock(&kernel_flag);
-                       return;
-               }
-
-               /*
-                * Otherwise, let's wait for the kernel lock
-                * with preemption enabled..
-                */
-               do {
-                       preempt_enable();
-                       while (raw_spin_is_locked(&kernel_flag))
-                               cpu_relax();
-                       preempt_disable();
-               } while (!do_raw_spin_trylock(&kernel_flag));
-       }
-}
-
-#else
-
-/*
- * Non-preemption case - just get the spinlock
- */
-static inline void __lock_kernel(void)
-{
-       do_raw_spin_lock(&kernel_flag);
-}
-#endif
-
-static inline void __unlock_kernel(void)
-{
-       /*
-        * the BKL is not covered by lockdep, so we open-code the
-        * unlocking sequence (and thus avoid the dep-chain ops):
-        */
-       do_raw_spin_unlock(&kernel_flag);
-       preempt_enable();
-}
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously, so we only need to
- * worry about other CPU's.
- */
-void __lockfunc _lock_kernel(const char *func, const char *file, int line)
-{
-       int depth = current->lock_depth + 1;
-
-       if (likely(!depth)) {
-               might_sleep();
-               __lock_kernel();
-       }
-       current->lock_depth = depth;
-}
-
-void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
-{
-       BUG_ON(current->lock_depth < 0);
-       if (likely(--current->lock_depth < 0))
-               __unlock_kernel();
-}
-
-EXPORT_SYMBOL(_lock_kernel);
-EXPORT_SYMBOL(_unlock_kernel);
-