*/
#define in_nmi() (preempt_count() & NMI_MASK)
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_BKL)
-# include <linux/sched.h>
-# define PREEMPT_INATOMIC_BASE (current->lock_depth >= 0)
-#else
-# define PREEMPT_INATOMIC_BASE 0
-#endif
-
#if defined(CONFIG_PREEMPT)
# define PREEMPT_CHECK_OFFSET 1
#else
* used in the general case to determine whether sleeping is possible.
* Do not use in_atomic() in driver code.
*/
-#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_INATOMIC_BASE)
+#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
/*
* Check whether we were atomic before we did preempt_disable():
+++ /dev/null
-#ifndef __LINUX_SMPLOCK_H
-#define __LINUX_SMPLOCK_H
-
-#ifdef CONFIG_LOCK_KERNEL
-#include <linux/sched.h>
-
-extern int __lockfunc __reacquire_kernel_lock(void);
-extern void __lockfunc __release_kernel_lock(void);
-
-/*
- * Release/re-acquire global kernel lock for the scheduler
- */
-#define release_kernel_lock(tsk) do { \
- if (unlikely((tsk)->lock_depth >= 0)) \
- __release_kernel_lock(); \
-} while (0)
-
-static inline int reacquire_kernel_lock(struct task_struct *task)
-{
- if (unlikely(task->lock_depth >= 0))
- return __reacquire_kernel_lock();
- return 0;
-}
-
-extern void __lockfunc
-_lock_kernel(const char *func, const char *file, int line)
-__acquires(kernel_lock);
-
-extern void __lockfunc
-_unlock_kernel(const char *func, const char *file, int line)
-__releases(kernel_lock);
-
-#define lock_kernel() do { \
- _lock_kernel(__func__, __FILE__, __LINE__); \
-} while (0)
-
-#define unlock_kernel() do { \
- _unlock_kernel(__func__, __FILE__, __LINE__); \
-} while (0)
-
-/*
- * Various legacy drivers don't really need the BKL in a specific
- * function, but they *do* need to know that the BKL became available.
- * This function just avoids wrapping a bunch of lock/unlock pairs
- * around code which doesn't really need it.
- */
-static inline void cycle_kernel_lock(void)
-{
- lock_kernel();
- unlock_kernel();
-}
-
-#else
-
-#ifdef CONFIG_BKL /* provoke build bug if not set */
-#define lock_kernel()
-#define unlock_kernel()
-#define cycle_kernel_lock() do { } while(0)
-#endif /* CONFIG_BKL */
-
-#define release_kernel_lock(task) do { } while(0)
-#define reacquire_kernel_lock(task) 0
-
-#endif /* CONFIG_LOCK_KERNEL */
-#endif /* __LINUX_SMPLOCK_H */
depends on BROKEN || !SMP
default y
-config LOCK_KERNEL
- bool
- depends on (SMP || PREEMPT) && BKL
- default y
-
config INIT_ENV_ARG_LIMIT
int
default 32 if !UML
#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
-#include <linux/smp_lock.h>
#include <asm/mmu_context.h>
#include <linux/interrupt.h>
#include <linux/capability.h>
rcu_note_context_switch(cpu);
prev = rq->curr;
- release_kernel_lock(prev);
-need_resched_nonpreemptible:
-
schedule_debug(prev);
if (sched_feat(HRTICK))
post_schedule(rq);
- if (unlikely(reacquire_kernel_lock(prev)))
- goto need_resched_nonpreemptible;
-
preempt_enable_no_resched();
if (need_resched())
goto need_resched;
{
int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
- return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
+ return (nested == preempt_offset);
}
void __might_sleep(const char *file, int line, int preempt_offset)
This feature allows mutex semantics violations to be detected and
reported.
-config BKL
- bool "Big Kernel Lock" if (SMP || PREEMPT)
- default y
- help
- This is the traditional lock that is used in old code instead
- of proper locking. All drivers that use the BKL should depend
- on this symbol.
- Say Y here unless you are working on removing the BKL.
-
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
-obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
obj-$(CONFIG_BTREE) += btree.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
+++ /dev/null
-/*
- * lib/kernel_lock.c
- *
- * This is the traditional BKL - big kernel lock. Largely
- * relegated to obsolescence, but used by various less
- * important (or lazy) subsystems.
- */
-#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/semaphore.h>
-#include <linux/smp_lock.h>
-
-/*
- * The 'big kernel lock'
- *
- * This spinlock is taken and released recursively by lock_kernel()
- * and unlock_kernel(). It is transparently dropped and reacquired
- * over schedule(). It is used to protect legacy code that hasn't
- * been migrated to a proper locking design yet.
- *
- * Don't use in new code.
- */
-static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
-
-
-/*
- * Acquire/release the underlying lock from the scheduler.
- *
- * This is called with preemption disabled, and should
- * return an error value if it cannot get the lock and
- * TIF_NEED_RESCHED gets set.
- *
- * If it successfully gets the lock, it should increment
- * the preemption count like any spinlock does.
- *
- * (This works on UP too - do_raw_spin_trylock will never
- * return false in that case)
- */
-int __lockfunc __reacquire_kernel_lock(void)
-{
- while (!do_raw_spin_trylock(&kernel_flag)) {
- if (need_resched())
- return -EAGAIN;
- cpu_relax();
- }
- preempt_disable();
- return 0;
-}
-
-void __lockfunc __release_kernel_lock(void)
-{
- do_raw_spin_unlock(&kernel_flag);
- preempt_enable_no_resched();
-}
-
-/*
- * These are the BKL spinlocks - we try to be polite about preemption.
- * If SMP is not on (ie UP preemption), this all goes away because the
- * do_raw_spin_trylock() will always succeed.
- */
-#ifdef CONFIG_PREEMPT
-static inline void __lock_kernel(void)
-{
- preempt_disable();
- if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
- /*
- * If preemption was disabled even before this
- * was called, there's nothing we can be polite
- * about - just spin.
- */
- if (preempt_count() > 1) {
- do_raw_spin_lock(&kernel_flag);
- return;
- }
-
- /*
- * Otherwise, let's wait for the kernel lock
- * with preemption enabled..
- */
- do {
- preempt_enable();
- while (raw_spin_is_locked(&kernel_flag))
- cpu_relax();
- preempt_disable();
- } while (!do_raw_spin_trylock(&kernel_flag));
- }
-}
-
-#else
-
-/*
- * Non-preemption case - just get the spinlock
- */
-static inline void __lock_kernel(void)
-{
- do_raw_spin_lock(&kernel_flag);
-}
-#endif
-
-static inline void __unlock_kernel(void)
-{
- /*
- * the BKL is not covered by lockdep, so we open-code the
- * unlocking sequence (and thus avoid the dep-chain ops):
- */
- do_raw_spin_unlock(&kernel_flag);
- preempt_enable();
-}
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously, so we only need to
- * worry about other CPU's.
- */
-void __lockfunc _lock_kernel(const char *func, const char *file, int line)
-{
- int depth = current->lock_depth + 1;
-
- if (likely(!depth)) {
- might_sleep();
- __lock_kernel();
- }
- current->lock_depth = depth;
-}
-
-void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
-{
- BUG_ON(current->lock_depth < 0);
- if (likely(--current->lock_depth < 0))
- __unlock_kernel();
-}
-
-EXPORT_SYMBOL(_lock_kernel);
-EXPORT_SYMBOL(_unlock_kernel);
-