diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 127 | ||||
| -rw-r--r-- | lib/Makefile | 3 | ||||
| -rw-r--r-- | lib/debug_locks.c | 45 | ||||
| -rw-r--r-- | lib/kernel_lock.c | 7 | ||||
| -rw-r--r-- | lib/locking-selftest-hardirq.h | 9 | ||||
| -rw-r--r-- | lib/locking-selftest-mutex.h | 11 | ||||
| -rw-r--r-- | lib/locking-selftest-rlock-hardirq.h | 2 | ||||
| -rw-r--r-- | lib/locking-selftest-rlock-softirq.h | 2 | ||||
| -rw-r--r-- | lib/locking-selftest-rlock.h | 14 | ||||
| -rw-r--r-- | lib/locking-selftest-rsem.h | 14 | ||||
| -rw-r--r-- | lib/locking-selftest-softirq.h | 9 | ||||
| -rw-r--r-- | lib/locking-selftest-spin-hardirq.h | 2 | ||||
| -rw-r--r-- | lib/locking-selftest-spin-softirq.h | 2 | ||||
| -rw-r--r-- | lib/locking-selftest-spin.h | 11 | ||||
| -rw-r--r-- | lib/locking-selftest-wlock-hardirq.h | 2 | ||||
| -rw-r--r-- | lib/locking-selftest-wlock-softirq.h | 2 | ||||
| -rw-r--r-- | lib/locking-selftest-wlock.h | 14 | ||||
| -rw-r--r-- | lib/locking-selftest-wsem.h | 14 | ||||
| -rw-r--r-- | lib/locking-selftest.c | 1216 | ||||
| -rw-r--r-- | lib/rwsem-spinlock.c | 66 | ||||
| -rw-r--r-- | lib/rwsem.c | 51 | ||||
| -rw-r--r-- | lib/spinlock_debug.c | 98 | 
22 files changed, 1592 insertions, 129 deletions
| diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e4fcbd12cf6e..e5889b1a33ff 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -48,7 +48,7 @@ config DEBUG_KERNEL  config LOG_BUF_SHIFT  	int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL  	range 12 21 -	default 17 if S390 +	default 17 if S390 || LOCKDEP  	default 16 if X86_NUMAQ || IA64  	default 15 if SMP  	default 14 @@ -107,7 +107,7 @@ config DEBUG_SLAB_LEAK  config DEBUG_PREEMPT  	bool "Debug preemptible kernel" -	depends on DEBUG_KERNEL && PREEMPT +	depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT  	default y  	help  	  If you say Y here then the kernel will use a debug variant of the @@ -115,14 +115,6 @@ config DEBUG_PREEMPT  	  if kernel code uses it in a preemption-unsafe way. Also, the kernel  	  will detect preemption count underflows. -config DEBUG_MUTEXES -	bool "Mutex debugging, deadlock detection" -	default n -	depends on DEBUG_KERNEL -	help -	 This allows mutex semantics violations and mutex related deadlocks -	 (lockups) to be detected and reported automatically. -  config DEBUG_RT_MUTEXES  	bool "RT Mutex debugging, deadlock detection"  	depends on DEBUG_KERNEL && RT_MUTEXES @@ -142,7 +134,7 @@ config RT_MUTEX_TESTER  	  This option enables a rt-mutex tester.  config DEBUG_SPINLOCK -	bool "Spinlock debugging" +	bool "Spinlock and rw-lock debugging: basic checks"  	depends on DEBUG_KERNEL  	help  	  Say Y here and build SMP to catch missing spinlock initialization @@ -150,13 +142,122 @@ config DEBUG_SPINLOCK  	  best used in conjunction with the NMI watchdog so that spinlock  	  deadlocks are also debuggable. +config DEBUG_MUTEXES +	bool "Mutex debugging: basic checks" +	depends on DEBUG_KERNEL +	help +	 This feature allows mutex semantics violations to be detected and +	 reported. + +config DEBUG_RWSEMS +	bool "RW-sem debugging: basic checks" +	depends on DEBUG_KERNEL +	help +	 This feature allows read-write semaphore semantics violations to +	 be detected and reported. + +config DEBUG_LOCK_ALLOC +	bool "Lock debugging: detect incorrect freeing of live locks" +	depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT +	select DEBUG_SPINLOCK +	select DEBUG_MUTEXES +	select DEBUG_RWSEMS +	select LOCKDEP +	help +	 This feature will check whether any held lock (spinlock, rwlock, +	 mutex or rwsem) is incorrectly freed by the kernel, via any of the +	 memory-freeing routines (kfree(), kmem_cache_free(), free_pages(), +	 vfree(), etc.), whether a live lock is incorrectly reinitialized via +	 spin_lock_init()/mutex_init()/etc., or whether there is any lock +	 held during task exit. + +config PROVE_LOCKING +	bool "Lock debugging: prove locking correctness" +	depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT +	select LOCKDEP +	select DEBUG_SPINLOCK +	select DEBUG_MUTEXES +	select DEBUG_RWSEMS +	select DEBUG_LOCK_ALLOC +	default n +	help +	 This feature enables the kernel to prove that all locking +	 that occurs in the kernel runtime is mathematically +	 correct: that under no circumstance could an arbitrary (and +	 not yet triggered) combination of observed locking +	 sequences (on an arbitrary number of CPUs, running an +	 arbitrary number of tasks and interrupt contexts) cause a +	 deadlock. + +	 In short, this feature enables the kernel to report locking +	 related deadlocks before they actually occur. + +	 The proof does not depend on how hard and complex a +	 deadlock scenario would be to trigger: how many +	 participant CPUs, tasks and irq-contexts would be needed +	 for it to trigger. The proof also does not depend on +	 timing: if a race and a resulting deadlock is possible +	 theoretically (no matter how unlikely the race scenario +	 is), it will be proven so and will immediately be +	 reported by the kernel (once the event is observed that +	 makes the deadlock theoretically possible). + +	 If a deadlock is impossible (i.e. the locking rules, as +	 observed by the kernel, are mathematically correct), the +	 kernel reports nothing. + +	 NOTE: this feature can also be enabled for rwlocks, mutexes +	 and rwsems - in which case all dependencies between these +	 different locking variants are observed and mapped too, and +	 the proof of observed correctness is also maintained for an +	 arbitrary combination of these separate locking variants. + +	 For more details, see Documentation/lockdep-design.txt. + +config LOCKDEP +	bool +	depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT +	select STACKTRACE +	select FRAME_POINTER +	select KALLSYMS +	select KALLSYMS_ALL + +config DEBUG_LOCKDEP +	bool "Lock dependency engine debugging" +	depends on LOCKDEP +	help +	  If you say Y here, the lock dependency engine will do +	  additional runtime checks to debug itself, at the price +	  of more runtime overhead. + +config TRACE_IRQFLAGS +	bool +	default y +	depends on TRACE_IRQFLAGS_SUPPORT +	depends on PROVE_LOCKING +  config DEBUG_SPINLOCK_SLEEP -	bool "Sleep-inside-spinlock checking" +	bool "Spinlock debugging: sleep-inside-spinlock checking"  	depends on DEBUG_KERNEL  	help  	  If you say Y here, various routines which may sleep will become very  	  noisy if they are called with a spinlock held. +config DEBUG_LOCKING_API_SELFTESTS +	bool "Locking API boot-time self-tests" +	depends on DEBUG_KERNEL +	help +	  Say Y here if you want the kernel to run a short self-test during +	  bootup. The self-test checks whether common types of locking bugs +	  are detected by debugging mechanisms or not. (if you disable +	  lock debugging then those bugs wont be detected of course.) +	  The following locking APIs are covered: spinlocks, rwlocks, +	  mutexes and rwsems. + +config STACKTRACE +	bool +	depends on STACKTRACE_SUPPORT +  config DEBUG_KOBJECT  	bool "kobject debugging"  	depends on DEBUG_KERNEL @@ -212,7 +313,7 @@ config DEBUG_VM  config FRAME_POINTER  	bool "Compile the kernel with frame pointers" -	depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML) +	depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390)  	default y if DEBUG_INFO && UML  	help  	  If you say Y here the resulting kernel image will be slightly larger diff --git a/lib/Makefile b/lib/Makefile index 10c13c9d7824..be9719ae82d0 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -11,13 +11,14 @@ lib-$(CONFIG_SMP) += cpumask.o  lib-y	+= kobject.o kref.o kobject_uevent.o klist.o -obj-y += sort.o parser.o halfmd4.o iomap_copy.o +obj-y += sort.o parser.o halfmd4.o iomap_copy.o debug_locks.o  ifeq ($(CONFIG_DEBUG_KOBJECT),y)  CFLAGS_kobject.o += -DDEBUG  CFLAGS_kobject_uevent.o += -DDEBUG  endif +obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o  obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o  lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o  lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o diff --git a/lib/debug_locks.c b/lib/debug_locks.c new file mode 100644 index 000000000000..0ef01d14727c --- /dev/null +++ b/lib/debug_locks.c @@ -0,0 +1,45 @@ +/* + * lib/debug_locks.c + * + * Generic place for common debugging facilities for various locks: + * spinlocks, rwlocks, mutexes and rwsems. + * + * Started by Ingo Molnar: + * + *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> + */ +#include <linux/rwsem.h> +#include <linux/mutex.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/debug_locks.h> + +/* + * We want to turn all lock-debugging facilities on/off at once, + * via a global flag. The reason is that once a single bug has been + * detected and reported, there might be cascade of followup bugs + * that would just muddy the log. So we report the first one and + * shut up after that. + */ +int debug_locks = 1; + +/* + * The locking-testsuite uses <debug_locks_silent> to get a + * 'silent failure': nothing is printed to the console when + * a locking bug is detected. + */ +int debug_locks_silent; + +/* + * Generic 'turn off all lock debugging' function: + */ +int debug_locks_off(void) +{ +	if (xchg(&debug_locks, 0)) { +		if (!debug_locks_silent) { +			console_verbose(); +			return 1; +		} +	} +	return 0; +} diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index e713e86811ae..e0fdfddb406e 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -177,7 +177,12 @@ static inline void __lock_kernel(void)  static inline void __unlock_kernel(void)  { -	spin_unlock(&kernel_flag); +	/* +	 * the BKL is not covered by lockdep, so we open-code the +	 * unlocking sequence (and thus avoid the dep-chain ops): +	 */ +	_raw_spin_unlock(&kernel_flag); +	preempt_enable();  }  /* diff --git a/lib/locking-selftest-hardirq.h b/lib/locking-selftest-hardirq.h new file mode 100644 index 000000000000..10d4a150b259 --- /dev/null +++ b/lib/locking-selftest-hardirq.h @@ -0,0 +1,9 @@ +#undef IRQ_DISABLE +#undef IRQ_ENABLE +#undef IRQ_ENTER +#undef IRQ_EXIT + +#define IRQ_ENABLE		HARDIRQ_ENABLE +#define IRQ_DISABLE		HARDIRQ_DISABLE +#define IRQ_ENTER		HARDIRQ_ENTER +#define IRQ_EXIT		HARDIRQ_EXIT diff --git a/lib/locking-selftest-mutex.h b/lib/locking-selftest-mutex.h new file mode 100644 index 000000000000..68601b6f584b --- /dev/null +++ b/lib/locking-selftest-mutex.h @@ -0,0 +1,11 @@ +#undef LOCK +#define LOCK		ML + +#undef UNLOCK +#define UNLOCK		MU + +#undef RLOCK +#undef WLOCK + +#undef INIT +#define INIT		MI diff --git a/lib/locking-selftest-rlock-hardirq.h b/lib/locking-selftest-rlock-hardirq.h new file mode 100644 index 000000000000..9f517ebcb786 --- /dev/null +++ b/lib/locking-selftest-rlock-hardirq.h @@ -0,0 +1,2 @@ +#include "locking-selftest-rlock.h" +#include "locking-selftest-hardirq.h" diff --git a/lib/locking-selftest-rlock-softirq.h b/lib/locking-selftest-rlock-softirq.h new file mode 100644 index 000000000000..981455db7ff0 --- /dev/null +++ b/lib/locking-selftest-rlock-softirq.h @@ -0,0 +1,2 @@ +#include "locking-selftest-rlock.h" +#include "locking-selftest-softirq.h" diff --git a/lib/locking-selftest-rlock.h b/lib/locking-selftest-rlock.h new file mode 100644 index 000000000000..6789044f4d0e --- /dev/null +++ b/lib/locking-selftest-rlock.h @@ -0,0 +1,14 @@ +#undef LOCK +#define LOCK		RL + +#undef UNLOCK +#define UNLOCK		RU + +#undef RLOCK +#define RLOCK		RL + +#undef WLOCK +#define WLOCK		WL + +#undef INIT +#define INIT		RWI diff --git a/lib/locking-selftest-rsem.h b/lib/locking-selftest-rsem.h new file mode 100644 index 000000000000..62da886680c7 --- /dev/null +++ b/lib/locking-selftest-rsem.h @@ -0,0 +1,14 @@ +#undef LOCK +#define LOCK		RSL + +#undef UNLOCK +#define UNLOCK		RSU + +#undef RLOCK +#define RLOCK		RSL + +#undef WLOCK +#define WLOCK		WSL + +#undef INIT +#define INIT		RWSI diff --git a/lib/locking-selftest-softirq.h b/lib/locking-selftest-softirq.h new file mode 100644 index 000000000000..a83de2a04ace --- /dev/null +++ b/lib/locking-selftest-softirq.h @@ -0,0 +1,9 @@ +#undef IRQ_DISABLE +#undef IRQ_ENABLE +#undef IRQ_ENTER +#undef IRQ_EXIT + +#define IRQ_DISABLE		SOFTIRQ_DISABLE +#define IRQ_ENABLE		SOFTIRQ_ENABLE +#define IRQ_ENTER		SOFTIRQ_ENTER +#define IRQ_EXIT		SOFTIRQ_EXIT diff --git a/lib/locking-selftest-spin-hardirq.h b/lib/locking-selftest-spin-hardirq.h new file mode 100644 index 000000000000..693198dce30a --- /dev/null +++ b/lib/locking-selftest-spin-hardirq.h @@ -0,0 +1,2 @@ +#include "locking-selftest-spin.h" +#include "locking-selftest-hardirq.h" diff --git a/lib/locking-selftest-spin-softirq.h b/lib/locking-selftest-spin-softirq.h new file mode 100644 index 000000000000..c472e2a87ffc --- /dev/null +++ b/lib/locking-selftest-spin-softirq.h @@ -0,0 +1,2 @@ +#include "locking-selftest-spin.h" +#include "locking-selftest-softirq.h" diff --git a/lib/locking-selftest-spin.h b/lib/locking-selftest-spin.h new file mode 100644 index 000000000000..ccd1b4b09757 --- /dev/null +++ b/lib/locking-selftest-spin.h @@ -0,0 +1,11 @@ +#undef LOCK +#define LOCK		L + +#undef UNLOCK +#define UNLOCK		U + +#undef RLOCK +#undef WLOCK + +#undef INIT +#define INIT		SI diff --git a/lib/locking-selftest-wlock-hardirq.h b/lib/locking-selftest-wlock-hardirq.h new file mode 100644 index 000000000000..2dd2e5122caa --- /dev/null +++ b/lib/locking-selftest-wlock-hardirq.h @@ -0,0 +1,2 @@ +#include "locking-selftest-wlock.h" +#include "locking-selftest-hardirq.h" diff --git a/lib/locking-selftest-wlock-softirq.h b/lib/locking-selftest-wlock-softirq.h new file mode 100644 index 000000000000..cb80d1cb944e --- /dev/null +++ b/lib/locking-selftest-wlock-softirq.h @@ -0,0 +1,2 @@ +#include "locking-selftest-wlock.h" +#include "locking-selftest-softirq.h" diff --git a/lib/locking-selftest-wlock.h b/lib/locking-selftest-wlock.h new file mode 100644 index 000000000000..0815322d99ed --- /dev/null +++ b/lib/locking-selftest-wlock.h @@ -0,0 +1,14 @@ +#undef LOCK +#define LOCK		WL + +#undef UNLOCK +#define UNLOCK		WU + +#undef RLOCK +#define RLOCK		RL + +#undef WLOCK +#define WLOCK		WL + +#undef INIT +#define INIT		RWI diff --git a/lib/locking-selftest-wsem.h b/lib/locking-selftest-wsem.h new file mode 100644 index 000000000000..b88c5f2dc5f0 --- /dev/null +++ b/lib/locking-selftest-wsem.h @@ -0,0 +1,14 @@ +#undef LOCK +#define LOCK		WSL + +#undef UNLOCK +#define UNLOCK		WSU + +#undef RLOCK +#define RLOCK		RSL + +#undef WLOCK +#define WLOCK		WSL + +#undef INIT +#define INIT		RWSI diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c new file mode 100644 index 000000000000..7945787f439a --- /dev/null +++ b/lib/locking-selftest.c @@ -0,0 +1,1216 @@ +/* + * lib/locking-selftest.c + * + * Testsuite for various locking APIs: spinlocks, rwlocks, + * mutexes and rw-semaphores. + * + * It is checking both false positives and false negatives. + * + * Started by Ingo Molnar: + * + *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> + */ +#include <linux/rwsem.h> +#include <linux/mutex.h> +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/lockdep.h> +#include <linux/spinlock.h> +#include <linux/kallsyms.h> +#include <linux/interrupt.h> +#include <linux/debug_locks.h> +#include <linux/irqflags.h> + +/* + * Change this to 1 if you want to see the failure printouts: + */ +static unsigned int debug_locks_verbose; + +static int __init setup_debug_locks_verbose(char *str) +{ +	get_option(&str, &debug_locks_verbose); + +	return 1; +} + +__setup("debug_locks_verbose=", setup_debug_locks_verbose); + +#define FAILURE		0 +#define SUCCESS		1 + +#define LOCKTYPE_SPIN	0x1 +#define LOCKTYPE_RWLOCK	0x2 +#define LOCKTYPE_MUTEX	0x4 +#define LOCKTYPE_RWSEM	0x8 + +/* + * Normal standalone locks, for the circular and irq-context + * dependency tests: + */ +static DEFINE_SPINLOCK(lock_A); +static DEFINE_SPINLOCK(lock_B); +static DEFINE_SPINLOCK(lock_C); +static DEFINE_SPINLOCK(lock_D); + +static DEFINE_RWLOCK(rwlock_A); +static DEFINE_RWLOCK(rwlock_B); +static DEFINE_RWLOCK(rwlock_C); +static DEFINE_RWLOCK(rwlock_D); + +static DEFINE_MUTEX(mutex_A); +static DEFINE_MUTEX(mutex_B); +static DEFINE_MUTEX(mutex_C); +static DEFINE_MUTEX(mutex_D); + +static DECLARE_RWSEM(rwsem_A); +static DECLARE_RWSEM(rwsem_B); +static DECLARE_RWSEM(rwsem_C); +static DECLARE_RWSEM(rwsem_D); + +/* + * Locks that we initialize dynamically as well so that + * e.g. X1 and X2 becomes two instances of the same class, + * but X* and Y* are different classes. We do this so that + * we do not trigger a real lockup: + */ +static DEFINE_SPINLOCK(lock_X1); +static DEFINE_SPINLOCK(lock_X2); +static DEFINE_SPINLOCK(lock_Y1); +static DEFINE_SPINLOCK(lock_Y2); +static DEFINE_SPINLOCK(lock_Z1); +static DEFINE_SPINLOCK(lock_Z2); + +static DEFINE_RWLOCK(rwlock_X1); +static DEFINE_RWLOCK(rwlock_X2); +static DEFINE_RWLOCK(rwlock_Y1); +static DEFINE_RWLOCK(rwlock_Y2); +static DEFINE_RWLOCK(rwlock_Z1); +static DEFINE_RWLOCK(rwlock_Z2); + +static DEFINE_MUTEX(mutex_X1); +static DEFINE_MUTEX(mutex_X2); +static DEFINE_MUTEX(mutex_Y1); +static DEFINE_MUTEX(mutex_Y2); +static DEFINE_MUTEX(mutex_Z1); +static DEFINE_MUTEX(mutex_Z2); + +static DECLARE_RWSEM(rwsem_X1); +static DECLARE_RWSEM(rwsem_X2); +static DECLARE_RWSEM(rwsem_Y1); +static DECLARE_RWSEM(rwsem_Y2); +static DECLARE_RWSEM(rwsem_Z1); +static DECLARE_RWSEM(rwsem_Z2); + +/* + * non-inlined runtime initializers, to let separate locks share + * the same lock-class: + */ +#define INIT_CLASS_FUNC(class) 				\ +static noinline void					\ +init_class_##class(spinlock_t *lock, rwlock_t *rwlock, struct mutex *mutex, \ +		 struct rw_semaphore *rwsem)		\ +{							\ +	spin_lock_init(lock);				\ +	rwlock_init(rwlock);				\ +	mutex_init(mutex);				\ +	init_rwsem(rwsem);				\ +} + +INIT_CLASS_FUNC(X) +INIT_CLASS_FUNC(Y) +INIT_CLASS_FUNC(Z) + +static void init_shared_classes(void) +{ +	init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1); +	init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2); + +	init_class_Y(&lock_Y1, &rwlock_Y1, &mutex_Y1, &rwsem_Y1); +	init_class_Y(&lock_Y2, &rwlock_Y2, &mutex_Y2, &rwsem_Y2); + +	init_class_Z(&lock_Z1, &rwlock_Z1, &mutex_Z1, &rwsem_Z1); +	init_class_Z(&lock_Z2, &rwlock_Z2, &mutex_Z2, &rwsem_Z2); +} + +/* + * For spinlocks and rwlocks we also do hardirq-safe / softirq-safe tests. + * The following functions use a lock from a simulated hardirq/softirq + * context, causing the locks to be marked as hardirq-safe/softirq-safe: + */ + +#define HARDIRQ_DISABLE		local_irq_disable +#define HARDIRQ_ENABLE		local_irq_enable + +#define HARDIRQ_ENTER()				\ +	local_irq_disable();			\ +	irq_enter();				\ +	WARN_ON(!in_irq()); + +#define HARDIRQ_EXIT()				\ +	__irq_exit();				\ +	local_irq_enable(); + +#define SOFTIRQ_DISABLE		local_bh_disable +#define SOFTIRQ_ENABLE		local_bh_enable + +#define SOFTIRQ_ENTER()				\ +		local_bh_disable();		\ +		local_irq_disable();		\ +		trace_softirq_enter();		\ +		WARN_ON(!in_softirq()); + +#define SOFTIRQ_EXIT()				\ +		trace_softirq_exit();		\ +		local_irq_enable();		\ +		local_bh_enable(); + +/* + * Shortcuts for lock/unlock API variants, to keep + * the testcases compact: + */ +#define L(x)			spin_lock(&lock_##x) +#define U(x)			spin_unlock(&lock_##x) +#define LU(x)			L(x); U(x) +#define SI(x)			spin_lock_init(&lock_##x) + +#define WL(x)			write_lock(&rwlock_##x) +#define WU(x)			write_unlock(&rwlock_##x) +#define WLU(x)			WL(x); WU(x) + +#define RL(x)			read_lock(&rwlock_##x) +#define RU(x)			read_unlock(&rwlock_##x) +#define RLU(x)			RL(x); RU(x) +#define RWI(x)			rwlock_init(&rwlock_##x) + +#define ML(x)			mutex_lock(&mutex_##x) +#define MU(x)			mutex_unlock(&mutex_##x) +#define MI(x)			mutex_init(&mutex_##x) + +#define WSL(x)			down_write(&rwsem_##x) +#define WSU(x)			up_write(&rwsem_##x) + +#define RSL(x)			down_read(&rwsem_##x) +#define RSU(x)			up_read(&rwsem_##x) +#define RWSI(x)			init_rwsem(&rwsem_##x) + +#define LOCK_UNLOCK_2(x,y)	LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x) + +/* + * Generate different permutations of the same testcase, using + * the same basic lock-dependency/state events: + */ + +#define GENERATE_TESTCASE(name)			\ +						\ +static void name(void) { E(); } + +#define GENERATE_PERMUTATIONS_2_EVENTS(name)	\ +						\ +static void name##_12(void) { E1(); E2(); }	\ +static void name##_21(void) { E2(); E1(); } + +#define GENERATE_PERMUTATIONS_3_EVENTS(name)		\ +							\ +static void name##_123(void) { E1(); E2(); E3(); }	\ +static void name##_132(void) { E1(); E3(); E2(); }	\ +static void name##_213(void) { E2(); E1(); E3(); }	\ +static void name##_231(void) { E2(); E3(); E1(); }	\ +static void name##_312(void) { E3(); E1(); E2(); }	\ +static void name##_321(void) { E3(); E2(); E1(); } + +/* + * AA deadlock: + */ + +#define E()					\ +						\ +	LOCK(X1);				\ +	LOCK(X2); /* this one should fail */ + +/* + * 6 testcases: + */ +#include "locking-selftest-spin.h" +GENERATE_TESTCASE(AA_spin) +#include "locking-selftest-wlock.h" +GENERATE_TESTCASE(AA_wlock) +#include "locking-selftest-rlock.h" +GENERATE_TESTCASE(AA_rlock) +#include "locking-selftest-mutex.h" +GENERATE_TESTCASE(AA_mutex) +#include "locking-selftest-wsem.h" +GENERATE_TESTCASE(AA_wsem) +#include "locking-selftest-rsem.h" +GENERATE_TESTCASE(AA_rsem) + +#undef E + +/* + * Special-case for read-locking, they are + * allowed to recurse on the same lock class: + */ +static void rlock_AA1(void) +{ +	RL(X1); +	RL(X1); // this one should NOT fail +} + +static void rlock_AA1B(void) +{ +	RL(X1); +	RL(X2); // this one should NOT fail +} + +static void rsem_AA1(void) +{ +	RSL(X1); +	RSL(X1); // this one should fail +} + +static void rsem_AA1B(void) +{ +	RSL(X1); +	RSL(X2); // this one should fail +} +/* + * The mixing of read and write locks is not allowed: + */ +static void rlock_AA2(void) +{ +	RL(X1); +	WL(X2); // this one should fail +} + +static void rsem_AA2(void) +{ +	RSL(X1); +	WSL(X2); // this one should fail +} + +static void rlock_AA3(void) +{ +	WL(X1); +	RL(X2); // this one should fail +} + +static void rsem_AA3(void) +{ +	WSL(X1); +	RSL(X2); // this one should fail +} + +/* + * ABBA deadlock: + */ + +#define E()					\ +						\ +	LOCK_UNLOCK_2(A, B);			\ +	LOCK_UNLOCK_2(B, A); /* fail */ + +/* + * 6 testcases: + */ +#include "locking-selftest-spin.h" +GENERATE_TESTCASE(ABBA_spin) +#include "locking-selftest-wlock.h" +GENERATE_TESTCASE(ABBA_wlock) +#include "locking-selftest-rlock.h" +GENERATE_TESTCASE(ABBA_rlock) +#include "locking-selftest-mutex.h" +GENERATE_TESTCASE(ABBA_mutex) +#include "locking-selftest-wsem.h" +GENERATE_TESTCASE(ABBA_wsem) +#include "locking-selftest-rsem.h" +GENERATE_TESTCASE(ABBA_rsem) + +#undef E + +/* + * AB BC CA deadlock: + */ + +#define E()					\ +						\ +	LOCK_UNLOCK_2(A, B);			\ +	LOCK_UNLOCK_2(B, C);			\ +	LOCK_UNLOCK_2(C, A); /* fail */ + +/* + * 6 testcases: + */ +#include "locking-selftest-spin.h" +GENERATE_TESTCASE(ABBCCA_spin) +#include "locking-selftest-wlock.h" +GENERATE_TESTCASE(ABBCCA_wlock) +#include "locking-selftest-rlock.h" +GENERATE_TESTCASE(ABBCCA_rlock) +#include "locking-selftest-mutex.h" +GENERATE_TESTCASE(ABBCCA_mutex) +#include "locking-selftest-wsem.h" +GENERATE_TESTCASE(ABBCCA_wsem) +#include "locking-selftest-rsem.h" +GENERATE_TESTCASE(ABBCCA_rsem) + +#undef E + +/* + * AB CA BC deadlock: + */ + +#define E()					\ +						\ +	LOCK_UNLOCK_2(A, B);			\ +	LOCK_UNLOCK_2(C, A);			\ +	LOCK_UNLOCK_2(B, C); /* fail */ + +/* + * 6 testcases: + */ +#include "locking-selftest-spin.h" +GENERATE_TESTCASE(ABCABC_spin) +#include "locking-selftest-wlock.h" +GENERATE_TESTCASE(ABCABC_wlock) +#include "locking-selftest-rlock.h" +GENERATE_TESTCASE(ABCABC_rlock) +#include "locking-selftest-mutex.h" +GENERATE_TESTCASE(ABCABC_mutex) +#include "locking-selftest-wsem.h" +GENERATE_TESTCASE(ABCABC_wsem) +#include "locking-selftest-rsem.h" +GENERATE_TESTCASE(ABCABC_rsem) + +#undef E + +/* + * AB BC CD DA deadlock: + */ + +#define E()					\ +						\ +	LOCK_UNLOCK_2(A, B);			\ +	LOCK_UNLOCK_2(B, C);			\ +	LOCK_UNLOCK_2(C, D);			\ +	LOCK_UNLOCK_2(D, A); /* fail */ + +/* + * 6 testcases: + */ +#include "locking-selftest-spin.h" +GENERATE_TESTCASE(ABBCCDDA_spin) +#include "locking-selftest-wlock.h" +GENERATE_TESTCASE(ABBCCDDA_wlock) +#include "locking-selftest-rlock.h" +GENERATE_TESTCASE(ABBCCDDA_rlock) +#include "locking-selftest-mutex.h" +GENERATE_TESTCASE(ABBCCDDA_mutex) +#include "locking-selftest-wsem.h" +GENERATE_TESTCASE(ABBCCDDA_wsem) +#include "locking-selftest-rsem.h" +GENERATE_TESTCASE(ABBCCDDA_rsem) + +#undef E + +/* + * AB CD BD DA deadlock: + */ +#define E()					\ +						\ +	LOCK_UNLOCK_2(A, B);			\ +	LOCK_UNLOCK_2(C, D);			\ +	LOCK_UNLOCK_2(B, D);			\ +	LOCK_UNLOCK_2(D, A); /* fail */ + +/* + * 6 testcases: + */ +#include "locking-selftest-spin.h" +GENERATE_TESTCASE(ABCDBDDA_spin) +#include "locking-selftest-wlock.h" +GENERATE_TESTCASE(ABCDBDDA_wlock) +#include "locking-selftest-rlock.h" +GENERATE_TESTCASE(ABCDBDDA_rlock) +#include "locking-selftest-mutex.h" +GENERATE_TESTCASE(ABCDBDDA_mutex) +#include "locking-selftest-wsem.h" +GENERATE_TESTCASE(ABCDBDDA_wsem) +#include "locking-selftest-rsem.h" +GENERATE_TESTCASE(ABCDBDDA_rsem) + +#undef E + +/* + * AB CD BC DA deadlock: + */ +#define E()					\ +						\ +	LOCK_UNLOCK_2(A, B);			\ +	LOCK_UNLOCK_2(C, D);			\ +	LOCK_UNLOCK_2(B, C);			\ +	LOCK_UNLOCK_2(D, A); /* fail */ + +/* + * 6 testcases: + */ +#include "locking-selftest-spin.h" +GENERATE_TESTCASE(ABCDBCDA_spin) +#include "locking-selftest-wlock.h" +GENERATE_TESTCASE(ABCDBCDA_wlock) +#include "locking-selftest-rlock.h" +GENERATE_TESTCASE(ABCDBCDA_rlock) +#include "locking-selftest-mutex.h" +GENERATE_TESTCASE(ABCDBCDA_mutex) +#include "locking-selftest-wsem.h" +GENERATE_TESTCASE(ABCDBCDA_wsem) +#include "locking-selftest-rsem.h" +GENERATE_TESTCASE(ABCDBCDA_rsem) + +#undef E + +/* + * Double unlock: + */ +#define E()					\ +						\ +	LOCK(A);				\ +	UNLOCK(A);				\ +	UNLOCK(A); /* fail */ + +/* + * 6 testcases: + */ +#include "locking-selftest-spin.h" +GENERATE_TESTCASE(double_unlock_spin) +#include "locking-selftest-wlock.h" +GENERATE_TESTCASE(double_unlock_wlock) +#include "locking-selftest-rlock.h" +GENERATE_TESTCASE(double_unlock_rlock) +#include "locking-selftest-mutex.h" +GENERATE_TESTCASE(double_unlock_mutex) +#include "locking-selftest-wsem.h" +GENERATE_TESTCASE(double_unlock_wsem) +#include "locking-selftest-rsem.h" +GENERATE_TESTCASE(double_unlock_rsem) + +#undef E + +/* + * Bad unlock ordering: + */ +#define E()					\ +						\ +	LOCK(A);				\ +	LOCK(B);				\ +	UNLOCK(A); /* fail */			\ +	UNLOCK(B); + +/* + * 6 testcases: + */ +#include "locking-selftest-spin.h" +GENERATE_TESTCASE(bad_unlock_order_spin) +#include "locking-selftest-wlock.h" +GENERATE_TESTCASE(bad_unlock_order_wlock) +#include "locking-selftest-rlock.h" +GENERATE_TESTCASE(bad_unlock_order_rlock) +#include "locking-selftest-mutex.h" +GENERATE_TESTCASE(bad_unlock_order_mutex) +#include "locking-selftest-wsem.h" +GENERATE_TESTCASE(bad_unlock_order_wsem) +#include "locking-selftest-rsem.h" +GENERATE_TESTCASE(bad_unlock_order_rsem) + +#undef E + +/* + * initializing a held lock: + */ +#define E()					\ +						\ +	LOCK(A);				\ +	INIT(A); /* fail */ + +/* + * 6 testcases: + */ +#include "locking-selftest-spin.h" +GENERATE_TESTCASE(init_held_spin) +#include "locking-selftest-wlock.h" +GENERATE_TESTCASE(init_held_wlock) +#include "locking-selftest-rlock.h" +GENERATE_TESTCASE(init_held_rlock) +#include "locking-selftest-mutex.h" +GENERATE_TESTCASE(init_held_mutex) +#include "locking-selftest-wsem.h" +GENERATE_TESTCASE(init_held_wsem) +#include "locking-selftest-rsem.h" +GENERATE_TESTCASE(init_held_rsem) + +#undef E + +/* + * locking an irq-safe lock with irqs enabled: + */ +#define E1()				\ +					\ +	IRQ_ENTER();			\ +	LOCK(A);			\ +	UNLOCK(A);			\ +	IRQ_EXIT(); + +#define E2()				\ +					\ +	LOCK(A);			\ +	UNLOCK(A); + +/* + * Generate 24 testcases: + */ +#include "locking-selftest-spin-hardirq.h" +GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin) + +#include "locking-selftest-rlock-hardirq.h" +GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock) + +#include "locking-selftest-wlock-hardirq.h" +GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock) + +#include "locking-selftest-spin-softirq.h" +GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin) + +#include "locking-selftest-rlock-softirq.h" +GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock) + +#include "locking-selftest-wlock-softirq.h" +GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) + +#undef E1 +#undef E2 + +/* + * Enabling hardirqs with a softirq-safe lock held: + */ +#define E1()				\ +					\ +	SOFTIRQ_ENTER();		\ +	LOCK(A);			\ +	UNLOCK(A);			\ +	SOFTIRQ_EXIT(); + +#define E2()				\ +					\ +	HARDIRQ_DISABLE();		\ +	LOCK(A);			\ +	HARDIRQ_ENABLE();		\ +	UNLOCK(A); + +/* + * Generate 12 testcases: + */ +#include "locking-selftest-spin.h" +GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_spin) + +#include "locking-selftest-wlock.h" +GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_wlock) + +#include "locking-selftest-rlock.h" +GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock) + +#undef E1 +#undef E2 + +/* + * Enabling irqs with an irq-safe lock held: + */ +#define E1()				\ +					\ +	IRQ_ENTER();			\ +	LOCK(A);			\ +	UNLOCK(A);			\ +	IRQ_EXIT(); + +#define E2()				\ +					\ +	IRQ_DISABLE();			\ +	LOCK(A);			\ +	IRQ_ENABLE();			\ +	UNLOCK(A); + +/* + * Generate 24 testcases: + */ +#include "locking-selftest-spin-hardirq.h" +GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin) + +#include "locking-selftest-rlock-hardirq.h" +GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock) + +#include "locking-selftest-wlock-hardirq.h" +GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock) + +#include "locking-selftest-spin-softirq.h" +GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin) + +#include "locking-selftest-rlock-softirq.h" +GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock) + +#include "locking-selftest-wlock-softirq.h" +GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) + +#undef E1 +#undef E2 + +/* + * Acquiring a irq-unsafe lock while holding an irq-safe-lock: + */ +#define E1()				\ +					\ +	LOCK(A);			\ +	LOCK(B);			\ +	UNLOCK(B);			\ +	UNLOCK(A);			\ + +#define E2()				\ +					\ +	LOCK(B);			\ +	UNLOCK(B); + +#define E3()				\ +					\ +	IRQ_ENTER();			\ +	LOCK(A);			\ +	UNLOCK(A);			\ +	IRQ_EXIT(); + +/* + * Generate 36 testcases: + */ +#include "locking-selftest-spin-hardirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin) + +#include "locking-selftest-rlock-hardirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock) + +#include "locking-selftest-wlock-hardirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock) + +#include "locking-selftest-spin-softirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin) + +#include "locking-selftest-rlock-softirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock) + +#include "locking-selftest-wlock-softirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) + +#undef E1 +#undef E2 +#undef E3 + +/* + * If a lock turns into softirq-safe, but earlier it took + * a softirq-unsafe lock: + */ + +#define E1()				\ +	IRQ_DISABLE();			\ +	LOCK(A);			\ +	LOCK(B);			\ +	UNLOCK(B);			\ +	UNLOCK(A);			\ +	IRQ_ENABLE(); + +#define E2()				\ +	LOCK(B);			\ +	UNLOCK(B); + +#define E3()				\ +	IRQ_ENTER();			\ +	LOCK(A);			\ +	UNLOCK(A);			\ +	IRQ_EXIT(); + +/* + * Generate 36 testcases: + */ +#include "locking-selftest-spin-hardirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin) + +#include "locking-selftest-rlock-hardirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock) + +#include "locking-selftest-wlock-hardirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock) + +#include "locking-selftest-spin-softirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin) + +#include "locking-selftest-rlock-softirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock) + +#include "locking-selftest-wlock-softirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) + +#undef E1 +#undef E2 +#undef E3 + +/* + * read-lock / write-lock irq inversion. + * + * Deadlock scenario: + * + * CPU#1 is at #1, i.e. it has write-locked A, but has not + * taken B yet. + * + * CPU#2 is at #2, i.e. it has locked B. + * + * Hardirq hits CPU#2 at point #2 and is trying to read-lock A. + * + * The deadlock occurs because CPU#1 will spin on B, and CPU#2 + * will spin on A. + */ + +#define E1()				\ +					\ +	IRQ_DISABLE();			\ +	WL(A);				\ +	LOCK(B);			\ +	UNLOCK(B);			\ +	WU(A);				\ +	IRQ_ENABLE(); + +#define E2()				\ +					\ +	LOCK(B);			\ +	UNLOCK(B); + +#define E3()				\ +					\ +	IRQ_ENTER();			\ +	RL(A);				\ +	RU(A);				\ +	IRQ_EXIT(); + +/* + * Generate 36 testcases: + */ +#include "locking-selftest-spin-hardirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_spin) + +#include "locking-selftest-rlock-hardirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock) + +#include "locking-selftest-wlock-hardirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock) + +#include "locking-selftest-spin-softirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin) + +#include "locking-selftest-rlock-softirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock) + +#include "locking-selftest-wlock-softirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock) + +#undef E1 +#undef E2 +#undef E3 + +/* + * read-lock / write-lock recursion that is actually safe. + */ + +#define E1()				\ +					\ +	IRQ_DISABLE();			\ +	WL(A);				\ +	WU(A);				\ +	IRQ_ENABLE(); + +#define E2()				\ +					\ +	RL(A);				\ +	RU(A);				\ + +#define E3()				\ +					\ +	IRQ_ENTER();			\ +	RL(A);				\ +	L(B);				\ +	U(B);				\ +	RU(A);				\ +	IRQ_EXIT(); + +/* + * Generate 12 testcases: + */ +#include "locking-selftest-hardirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard) + +#include "locking-selftest-softirq.h" +GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) + +#undef E1 +#undef E2 +#undef E3 + +/* + * read-lock / write-lock recursion that is unsafe. + */ + +#define E1()				\ +					\ +	IRQ_DISABLE();			\ +	L(B);				\ +	WL(A);				\ +	WU(A);				\ +	U(B);				\ +	IRQ_ENABLE(); + +#define E2()				\ +					\ +	RL(A);				\ +	RU(A);				\ + +#define E3()				\ +					\ +	IRQ_ENTER();			\ +	L(B);				\ +	U(B);				\ +	IRQ_EXIT(); + +/* + * Generate 12 testcases: + */ +#include "locking-selftest-hardirq.h" +// GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard) + +#include "locking-selftest-softirq.h" +// GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define I_SPINLOCK(x)	lockdep_reset_lock(&lock_##x.dep_map) +# define I_RWLOCK(x)	lockdep_reset_lock(&rwlock_##x.dep_map) +# define I_MUTEX(x)	lockdep_reset_lock(&mutex_##x.dep_map) +# define I_RWSEM(x)	lockdep_reset_lock(&rwsem_##x.dep_map) +#else +# define I_SPINLOCK(x) +# define I_RWLOCK(x) +# define I_MUTEX(x) +# define I_RWSEM(x) +#endif + +#define I1(x)					\ +	do {					\ +		I_SPINLOCK(x);			\ +		I_RWLOCK(x);			\ +		I_MUTEX(x);			\ +		I_RWSEM(x);			\ +	} while (0) + +#define I2(x)					\ +	do {					\ +		spin_lock_init(&lock_##x);	\ +		rwlock_init(&rwlock_##x);	\ +		mutex_init(&mutex_##x);		\ +		init_rwsem(&rwsem_##x);		\ +	} while (0) + +static void reset_locks(void) +{ +	local_irq_disable(); +	I1(A); I1(B); I1(C); I1(D); +	I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2); +	lockdep_reset(); +	I2(A); I2(B); I2(C); I2(D); +	init_shared_classes(); +	local_irq_enable(); +} + +#undef I + +static int testcase_total; +static int testcase_successes; +static int expected_testcase_failures; +static int unexpected_testcase_failures; + +static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) +{ +	unsigned long saved_preempt_count = preempt_count(); +	int expected_failure = 0; + +	WARN_ON(irqs_disabled()); + +	testcase_fn(); +	/* +	 * Filter out expected failures: +	 */ +#ifndef CONFIG_PROVE_LOCKING +	if ((lockclass_mask & LOCKTYPE_SPIN) && debug_locks != expected) +		expected_failure = 1; +	if ((lockclass_mask & LOCKTYPE_RWLOCK) && debug_locks != expected) +		expected_failure = 1; +	if ((lockclass_mask & LOCKTYPE_MUTEX) && debug_locks != expected) +		expected_failure = 1; +	if ((lockclass_mask & LOCKTYPE_RWSEM) && debug_locks != expected) +		expected_failure = 1; +#endif +	if (debug_locks != expected) { +		if (expected_failure) { +			expected_testcase_failures++; +			printk("failed|"); +		} else { +			unexpected_testcase_failures++; +			printk("FAILED|"); +		} +	} else { +		testcase_successes++; +		printk("  ok  |"); +	} +	testcase_total++; + +	if (debug_locks_verbose) +		printk(" lockclass mask: %x, debug_locks: %d, expected: %d\n", +			lockclass_mask, debug_locks, expected); +	/* +	 * Some tests (e.g. double-unlock) might corrupt the preemption +	 * count, so restore it: +	 */ +	preempt_count() = saved_preempt_count; +#ifdef CONFIG_TRACE_IRQFLAGS +	if (softirq_count()) +		current->softirqs_enabled = 0; +	else +		current->softirqs_enabled = 1; +#endif + +	reset_locks(); +} + +static inline void print_testname(const char *testname) +{ +	printk("%33s:", testname); +} + +#define DO_TESTCASE_1(desc, name, nr)				\ +	print_testname(desc"/"#nr);				\ +	dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK);		\ +	printk("\n"); + +#define DO_TESTCASE_1B(desc, name, nr)				\ +	print_testname(desc"/"#nr);				\ +	dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK);		\ +	printk("\n"); + +#define DO_TESTCASE_3(desc, name, nr)				\ +	print_testname(desc"/"#nr);				\ +	dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN);	\ +	dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK);	\ +	dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK);	\ +	printk("\n"); + +#define DO_TESTCASE_3RW(desc, name, nr)				\ +	print_testname(desc"/"#nr);				\ +	dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\ +	dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK);	\ +	dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK);	\ +	printk("\n"); + +#define DO_TESTCASE_6(desc, name)				\ +	print_testname(desc);					\ +	dotest(name##_spin, FAILURE, LOCKTYPE_SPIN);		\ +	dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK);		\ +	dotest(name##_rlock, FAILURE, LOCKTYPE_RWLOCK);		\ +	dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX);		\ +	dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM);		\ +	dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM);		\ +	printk("\n"); + +#define DO_TESTCASE_6_SUCCESS(desc, name)			\ +	print_testname(desc);					\ +	dotest(name##_spin, SUCCESS, LOCKTYPE_SPIN);		\ +	dotest(name##_wlock, SUCCESS, LOCKTYPE_RWLOCK);		\ +	dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK);		\ +	dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX);		\ +	dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM);		\ +	dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM);		\ +	printk("\n"); + +/* + * 'read' variant: rlocks must not trigger. + */ +#define DO_TESTCASE_6R(desc, name)				\ +	print_testname(desc);					\ +	dotest(name##_spin, FAILURE, LOCKTYPE_SPIN);		\ +	dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK);		\ +	dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK);		\ +	dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX);		\ +	dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM);		\ +	dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM);		\ +	printk("\n"); + +#define DO_TESTCASE_2I(desc, name, nr)				\ +	DO_TESTCASE_1("hard-"desc, name##_hard, nr);		\ +	DO_TESTCASE_1("soft-"desc, name##_soft, nr); + +#define DO_TESTCASE_2IB(desc, name, nr)				\ +	DO_TESTCASE_1B("hard-"desc, name##_hard, nr);		\ +	DO_TESTCASE_1B("soft-"desc, name##_soft, nr); + +#define DO_TESTCASE_6I(desc, name, nr)				\ +	DO_TESTCASE_3("hard-"desc, name##_hard, nr);		\ +	DO_TESTCASE_3("soft-"desc, name##_soft, nr); + +#define DO_TESTCASE_6IRW(desc, name, nr)			\ +	DO_TESTCASE_3RW("hard-"desc, name##_hard, nr);		\ +	DO_TESTCASE_3RW("soft-"desc, name##_soft, nr); + +#define DO_TESTCASE_2x3(desc, name)				\ +	DO_TESTCASE_3(desc, name, 12);				\ +	DO_TESTCASE_3(desc, name, 21); + +#define DO_TESTCASE_2x6(desc, name)				\ +	DO_TESTCASE_6I(desc, name, 12);				\ +	DO_TESTCASE_6I(desc, name, 21); + +#define DO_TESTCASE_6x2(desc, name)				\ +	DO_TESTCASE_2I(desc, name, 123);			\ +	DO_TESTCASE_2I(desc, name, 132);			\ +	DO_TESTCASE_2I(desc, name, 213);			\ +	DO_TESTCASE_2I(desc, name, 231);			\ +	DO_TESTCASE_2I(desc, name, 312);			\ +	DO_TESTCASE_2I(desc, name, 321); + +#define DO_TESTCASE_6x2B(desc, name)				\ +	DO_TESTCASE_2IB(desc, name, 123);			\ +	DO_TESTCASE_2IB(desc, name, 132);			\ +	DO_TESTCASE_2IB(desc, name, 213);			\ +	DO_TESTCASE_2IB(desc, name, 231);			\ +	DO_TESTCASE_2IB(desc, name, 312);			\ +	DO_TESTCASE_2IB(desc, name, 321); + +#define DO_TESTCASE_6x6(desc, name)				\ +	DO_TESTCASE_6I(desc, name, 123);			\ +	DO_TESTCASE_6I(desc, name, 132);			\ +	DO_TESTCASE_6I(desc, name, 213);			\ +	DO_TESTCASE_6I(desc, name, 231);			\ +	DO_TESTCASE_6I(desc, name, 312);			\ +	DO_TESTCASE_6I(desc, name, 321); + +#define DO_TESTCASE_6x6RW(desc, name)				\ +	DO_TESTCASE_6IRW(desc, name, 123);			\ +	DO_TESTCASE_6IRW(desc, name, 132);			\ +	DO_TESTCASE_6IRW(desc, name, 213);			\ +	DO_TESTCASE_6IRW(desc, name, 231);			\ +	DO_TESTCASE_6IRW(desc, name, 312);			\ +	DO_TESTCASE_6IRW(desc, name, 321); + + +void locking_selftest(void) +{ +	/* +	 * Got a locking failure before the selftest ran? +	 */ +	if (!debug_locks) { +		printk("----------------------------------\n"); +		printk("| Locking API testsuite disabled |\n"); +		printk("----------------------------------\n"); +		return; +	} + +	/* +	 * Run the testsuite: +	 */ +	printk("------------------------\n"); +	printk("| Locking API testsuite:\n"); +	printk("----------------------------------------------------------------------------\n"); +	printk("                                 | spin |wlock |rlock |mutex | wsem | rsem |\n"); +	printk("  --------------------------------------------------------------------------\n"); + +	init_shared_classes(); +	debug_locks_silent = !debug_locks_verbose; + +	DO_TESTCASE_6R("A-A deadlock", AA); +	DO_TESTCASE_6R("A-B-B-A deadlock", ABBA); +	DO_TESTCASE_6R("A-B-B-C-C-A deadlock", ABBCCA); +	DO_TESTCASE_6R("A-B-C-A-B-C deadlock", ABCABC); +	DO_TESTCASE_6R("A-B-B-C-C-D-D-A deadlock", ABBCCDDA); +	DO_TESTCASE_6R("A-B-C-D-B-D-D-A deadlock", ABCDBDDA); +	DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA); +	DO_TESTCASE_6("double unlock", double_unlock); +	DO_TESTCASE_6("initialize held", init_held); +	DO_TESTCASE_6_SUCCESS("bad unlock order", bad_unlock_order); + +	printk("  --------------------------------------------------------------------------\n"); +	print_testname("recursive read-lock"); +	printk("             |"); +	dotest(rlock_AA1, SUCCESS, LOCKTYPE_RWLOCK); +	printk("             |"); +	dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM); +	printk("\n"); + +	print_testname("recursive read-lock #2"); +	printk("             |"); +	dotest(rlock_AA1B, SUCCESS, LOCKTYPE_RWLOCK); +	printk("             |"); +	dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM); +	printk("\n"); + +	print_testname("mixed read-write-lock"); +	printk("             |"); +	dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK); +	printk("             |"); +	dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM); +	printk("\n"); + +	print_testname("mixed write-read-lock"); +	printk("             |"); +	dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK); +	printk("             |"); +	dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM); +	printk("\n"); + +	printk("  --------------------------------------------------------------------------\n"); + +	/* +	 * irq-context testcases: +	 */ +	DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1); +	DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A); +	DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B); +	DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3); +	DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4); +	DO_TESTCASE_6x6RW("irq lock-inversion", irq_inversion); + +	DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); +//	DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); + +	if (unexpected_testcase_failures) { +		printk("-----------------------------------------------------------------\n"); +		debug_locks = 0; +		printk("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n", +			unexpected_testcase_failures, testcase_total); +		printk("-----------------------------------------------------------------\n"); +	} else if (expected_testcase_failures && testcase_successes) { +		printk("--------------------------------------------------------\n"); +		printk("%3d out of %3d testcases failed, as expected. |\n", +			expected_testcase_failures, testcase_total); +		printk("----------------------------------------------------\n"); +		debug_locks = 1; +	} else if (expected_testcase_failures && !testcase_successes) { +		printk("--------------------------------------------------------\n"); +		printk("All %3d testcases failed, as expected. |\n", +			expected_testcase_failures); +		printk("----------------------------------------\n"); +		debug_locks = 1; +	} else { +		printk("-------------------------------------------------------\n"); +		printk("Good, all %3d testcases passed! |\n", +			testcase_successes); +		printk("---------------------------------\n"); +		debug_locks = 1; +	} +	debug_locks_silent = 0; +} diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 40ffde940a86..db4fed74b940 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -17,27 +17,22 @@ struct rwsem_waiter {  #define RWSEM_WAITING_FOR_WRITE	0x00000002  }; -#if RWSEM_DEBUG -void rwsemtrace(struct rw_semaphore *sem, const char *str) -{ -	if (sem->debug) -		printk("[%d] %s({%d,%d})\n", -		       current->pid, str, sem->activity, -		       list_empty(&sem->wait_list) ? 0 : 1); -} -#endif -  /*   * initialise the semaphore   */ -void fastcall init_rwsem(struct rw_semaphore *sem) +void __init_rwsem(struct rw_semaphore *sem, const char *name, +		  struct lock_class_key *key)  { +#ifdef CONFIG_DEBUG_LOCK_ALLOC +	/* +	 * Make sure we are not reinitializing a held semaphore: +	 */ +	debug_check_no_locks_freed((void *)sem, sizeof(*sem)); +	lockdep_init_map(&sem->dep_map, name, key); +#endif  	sem->activity = 0;  	spin_lock_init(&sem->wait_lock);  	INIT_LIST_HEAD(&sem->wait_list); -#if RWSEM_DEBUG -	sem->debug = 0; -#endif  }  /* @@ -56,8 +51,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)  	struct task_struct *tsk;  	int woken; -	rwsemtrace(sem, "Entering __rwsem_do_wake"); -  	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);  	if (!wakewrite) { @@ -104,7 +97,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)  	sem->activity += woken;   out: -	rwsemtrace(sem, "Leaving __rwsem_do_wake");  	return sem;  } @@ -138,8 +130,6 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)  	struct rwsem_waiter waiter;  	struct task_struct *tsk; -	rwsemtrace(sem, "Entering __down_read"); -  	spin_lock_irq(&sem->wait_lock);  	if (sem->activity >= 0 && list_empty(&sem->wait_list)) { @@ -171,9 +161,8 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)  	}  	tsk->state = TASK_RUNNING; -   out: -	rwsemtrace(sem, "Leaving __down_read"); +	;  }  /* @@ -184,7 +173,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)  	unsigned long flags;  	int ret = 0; -	rwsemtrace(sem, "Entering __down_read_trylock");  	spin_lock_irqsave(&sem->wait_lock, flags); @@ -196,7 +184,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)  	spin_unlock_irqrestore(&sem->wait_lock, flags); -	rwsemtrace(sem, "Leaving __down_read_trylock");  	return ret;  } @@ -204,13 +191,11 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)   * get a write lock on the semaphore   * - we increment the waiting count anyway to indicate an exclusive lock   */ -void fastcall __sched __down_write(struct rw_semaphore *sem) +void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass)  {  	struct rwsem_waiter waiter;  	struct task_struct *tsk; -	rwsemtrace(sem, "Entering __down_write"); -  	spin_lock_irq(&sem->wait_lock);  	if (sem->activity == 0 && list_empty(&sem->wait_list)) { @@ -242,9 +227,13 @@ void fastcall __sched __down_write(struct rw_semaphore *sem)  	}  	tsk->state = TASK_RUNNING; -   out: -	rwsemtrace(sem, "Leaving __down_write"); +	; +} + +void fastcall __sched __down_write(struct rw_semaphore *sem) +{ +	__down_write_nested(sem, 0);  }  /* @@ -255,8 +244,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)  	unsigned long flags;  	int ret = 0; -	rwsemtrace(sem, "Entering __down_write_trylock"); -  	spin_lock_irqsave(&sem->wait_lock, flags);  	if (sem->activity == 0 && list_empty(&sem->wait_list)) { @@ -267,7 +254,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)  	spin_unlock_irqrestore(&sem->wait_lock, flags); -	rwsemtrace(sem, "Leaving __down_write_trylock");  	return ret;  } @@ -278,16 +264,12 @@ void fastcall __up_read(struct rw_semaphore *sem)  {  	unsigned long flags; -	rwsemtrace(sem, "Entering __up_read"); -  	spin_lock_irqsave(&sem->wait_lock, flags);  	if (--sem->activity == 0 && !list_empty(&sem->wait_list))  		sem = __rwsem_wake_one_writer(sem);  	spin_unlock_irqrestore(&sem->wait_lock, flags); - -	rwsemtrace(sem, "Leaving __up_read");  }  /* @@ -297,8 +279,6 @@ void fastcall __up_write(struct rw_semaphore *sem)  {  	unsigned long flags; -	rwsemtrace(sem, "Entering __up_write"); -  	spin_lock_irqsave(&sem->wait_lock, flags);  	sem->activity = 0; @@ -306,8 +286,6 @@ void fastcall __up_write(struct rw_semaphore *sem)  		sem = __rwsem_do_wake(sem, 1);  	spin_unlock_irqrestore(&sem->wait_lock, flags); - -	rwsemtrace(sem, "Leaving __up_write");  }  /* @@ -318,8 +296,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem)  {  	unsigned long flags; -	rwsemtrace(sem, "Entering __downgrade_write"); -  	spin_lock_irqsave(&sem->wait_lock, flags);  	sem->activity = 1; @@ -327,18 +303,14 @@ void fastcall __downgrade_write(struct rw_semaphore *sem)  		sem = __rwsem_do_wake(sem, 0);  	spin_unlock_irqrestore(&sem->wait_lock, flags); - -	rwsemtrace(sem, "Leaving __downgrade_write");  } -EXPORT_SYMBOL(init_rwsem); +EXPORT_SYMBOL(__init_rwsem);  EXPORT_SYMBOL(__down_read);  EXPORT_SYMBOL(__down_read_trylock); +EXPORT_SYMBOL(__down_write_nested);  EXPORT_SYMBOL(__down_write);  EXPORT_SYMBOL(__down_write_trylock);  EXPORT_SYMBOL(__up_read);  EXPORT_SYMBOL(__up_write);  EXPORT_SYMBOL(__downgrade_write); -#if RWSEM_DEBUG -EXPORT_SYMBOL(rwsemtrace); -#endif diff --git a/lib/rwsem.c b/lib/rwsem.c index 62fa4eba9ffe..b322421c2969 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c @@ -8,6 +8,26 @@  #include <linux/init.h>  #include <linux/module.h> +/* + * Initialize an rwsem: + */ +void __init_rwsem(struct rw_semaphore *sem, const char *name, +		  struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC +	/* +	 * Make sure we are not reinitializing a held semaphore: +	 */ +	debug_check_no_locks_freed((void *)sem, sizeof(*sem)); +	lockdep_init_map(&sem->dep_map, name, key); +#endif +	sem->count = RWSEM_UNLOCKED_VALUE; +	spin_lock_init(&sem->wait_lock); +	INIT_LIST_HEAD(&sem->wait_list); +} + +EXPORT_SYMBOL(__init_rwsem); +  struct rwsem_waiter {  	struct list_head list;  	struct task_struct *task; @@ -16,17 +36,6 @@ struct rwsem_waiter {  #define RWSEM_WAITING_FOR_WRITE	0x00000002  }; -#if RWSEM_DEBUG -#undef rwsemtrace -void rwsemtrace(struct rw_semaphore *sem, const char *str) -{ -	printk("sem=%p\n", sem); -	printk("(sem)=%08lx\n", sem->count); -	if (sem->debug) -		printk("[%d] %s({%08lx})\n", current->pid, str, sem->count); -} -#endif -  /*   * handle the lock release when processes blocked on it that can now run   * - if we come here from up_xxxx(), then: @@ -45,8 +54,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)  	struct list_head *next;  	signed long oldcount, woken, loop; -	rwsemtrace(sem, "Entering __rwsem_do_wake"); -  	if (downgrading)  		goto dont_wake_writers; @@ -127,7 +134,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)  	next->prev = &sem->wait_list;   out: -	rwsemtrace(sem, "Leaving __rwsem_do_wake");  	return sem;  	/* undo the change to count, but check for a transition 1->0 */ @@ -186,13 +192,9 @@ rwsem_down_read_failed(struct rw_semaphore *sem)  {  	struct rwsem_waiter waiter; -	rwsemtrace(sem, "Entering rwsem_down_read_failed"); -  	waiter.flags = RWSEM_WAITING_FOR_READ;  	rwsem_down_failed_common(sem, &waiter,  				RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS); - -	rwsemtrace(sem, "Leaving rwsem_down_read_failed");  	return sem;  } @@ -204,12 +206,9 @@ rwsem_down_write_failed(struct rw_semaphore *sem)  {  	struct rwsem_waiter waiter; -	rwsemtrace(sem, "Entering rwsem_down_write_failed"); -  	waiter.flags = RWSEM_WAITING_FOR_WRITE;  	rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS); -	rwsemtrace(sem, "Leaving rwsem_down_write_failed");  	return sem;  } @@ -221,8 +220,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)  {  	unsigned long flags; -	rwsemtrace(sem, "Entering rwsem_wake"); -  	spin_lock_irqsave(&sem->wait_lock, flags);  	/* do nothing if list empty */ @@ -231,8 +228,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)  	spin_unlock_irqrestore(&sem->wait_lock, flags); -	rwsemtrace(sem, "Leaving rwsem_wake"); -  	return sem;  } @@ -245,8 +240,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)  {  	unsigned long flags; -	rwsemtrace(sem, "Entering rwsem_downgrade_wake"); -  	spin_lock_irqsave(&sem->wait_lock, flags);  	/* do nothing if list empty */ @@ -255,7 +248,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)  	spin_unlock_irqrestore(&sem->wait_lock, flags); -	rwsemtrace(sem, "Leaving rwsem_downgrade_wake");  	return sem;  } @@ -263,6 +255,3 @@ EXPORT_SYMBOL(rwsem_down_read_failed);  EXPORT_SYMBOL(rwsem_down_write_failed);  EXPORT_SYMBOL(rwsem_wake);  EXPORT_SYMBOL(rwsem_downgrade_wake); -#if RWSEM_DEBUG -EXPORT_SYMBOL(rwsemtrace); -#endif diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 93c15ee3f8ea..3d9c4dc965ed 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -8,38 +8,71 @@  #include <linux/spinlock.h>  #include <linux/interrupt.h> +#include <linux/debug_locks.h>  #include <linux/delay.h> +#include <linux/module.h> + +void __spin_lock_init(spinlock_t *lock, const char *name, +		      struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC +	/* +	 * Make sure we are not reinitializing a held lock: +	 */ +	debug_check_no_locks_freed((void *)lock, sizeof(*lock)); +	lockdep_init_map(&lock->dep_map, name, key); +#endif +	lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +	lock->magic = SPINLOCK_MAGIC; +	lock->owner = SPINLOCK_OWNER_INIT; +	lock->owner_cpu = -1; +} + +EXPORT_SYMBOL(__spin_lock_init); + +void __rwlock_init(rwlock_t *lock, const char *name, +		   struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC +	/* +	 * Make sure we are not reinitializing a held lock: +	 */ +	debug_check_no_locks_freed((void *)lock, sizeof(*lock)); +	lockdep_init_map(&lock->dep_map, name, key); +#endif +	lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; +	lock->magic = RWLOCK_MAGIC; +	lock->owner = SPINLOCK_OWNER_INIT; +	lock->owner_cpu = -1; +} + +EXPORT_SYMBOL(__rwlock_init);  static void spin_bug(spinlock_t *lock, const char *msg)  { -	static long print_once = 1;  	struct task_struct *owner = NULL; -	if (xchg(&print_once, 0)) { -		if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) -			owner = lock->owner; -		printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", -			msg, raw_smp_processor_id(), -			current->comm, current->pid); -		printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, " -				".owner_cpu: %d\n", -			lock, lock->magic, -			owner ? owner->comm : "<none>", -			owner ? owner->pid : -1, -			lock->owner_cpu); -		dump_stack(); -#ifdef CONFIG_SMP -		/* -		 * We cannot continue on SMP: -		 */ -//		panic("bad locking"); -#endif -	} +	if (!debug_locks_off()) +		return; + +	if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) +		owner = lock->owner; +	printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", +		msg, raw_smp_processor_id(), +		current->comm, current->pid); +	printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, " +			".owner_cpu: %d\n", +		lock, lock->magic, +		owner ? owner->comm : "<none>", +		owner ? owner->pid : -1, +		lock->owner_cpu); +	dump_stack();  }  #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) -static inline void debug_spin_lock_before(spinlock_t *lock) +static inline void +debug_spin_lock_before(spinlock_t *lock)  {  	SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");  	SPIN_BUG_ON(lock->owner == current, lock, "recursion"); @@ -118,20 +151,13 @@ void _raw_spin_unlock(spinlock_t *lock)  static void rwlock_bug(rwlock_t *lock, const char *msg)  { -	static long print_once = 1; - -	if (xchg(&print_once, 0)) { -		printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", -			msg, raw_smp_processor_id(), current->comm, -			current->pid, lock); -		dump_stack(); -#ifdef CONFIG_SMP -		/* -		 * We cannot continue on SMP: -		 */ -		panic("bad locking"); -#endif -	} +	if (!debug_locks_off()) +		return; + +	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", +		msg, raw_smp_processor_id(), current->comm, +		current->pid, lock); +	dump_stack();  }  #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) | 
