diff options
| author | Ingo Molnar <mingo@kernel.org> | 2012-10-24 10:20:57 +0200 | 
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2012-10-24 10:20:57 +0200 | 
| commit | ef8c029fa793423439e67ef0416b220d3fa3321a (patch) | |
| tree | 4199cefa6e1dcad1783040755246a14371f029af /lib/spinlock_debug.c | |
| parent | 6fcdb1ed2ef3548d5a9428d6ae60158ddd46a608 (diff) | |
| parent | c13d38e4a1fd5dd07135403c613c8091af444169 (diff) | |
Merge branch 'perf/urgent' into perf/core
Pick up v3.7-rc2 and fixes before applying more patches.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'lib/spinlock_debug.c')
| -rw-r--r-- | lib/spinlock_debug.c | 32 | 
1 files changed, 18 insertions, 14 deletions
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index eb10578ae055..0374a596cffa 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -107,23 +107,27 @@ static void __spin_lock_debug(raw_spinlock_t *lock)  {  	u64 i;  	u64 loops = loops_per_jiffy * HZ; -	int print_once = 1; -	for (;;) { -		for (i = 0; i < loops; i++) { -			if (arch_spin_trylock(&lock->raw_lock)) -				return; -			__delay(1); -		} -		/* lockup suspected: */ -		if (print_once) { -			print_once = 0; -			spin_dump(lock, "lockup suspected"); +	for (i = 0; i < loops; i++) { +		if (arch_spin_trylock(&lock->raw_lock)) +			return; +		__delay(1); +	} +	/* lockup suspected: */ +	spin_dump(lock, "lockup suspected");  #ifdef CONFIG_SMP -			trigger_all_cpu_backtrace(); +	trigger_all_cpu_backtrace();  #endif -		} -	} + +	/* +	 * The trylock above was causing a livelock.  Give the lower level arch +	 * specific lock code a chance to acquire the lock. We have already +	 * printed a warning/backtrace at this point. The non-debug arch +	 * specific code might actually succeed in acquiring the lock.  If it is +	 * not successful, the end-result is the same - there is no forward +	 * progress. +	 */ +	arch_spin_lock(&lock->raw_lock);  }  void do_raw_spin_lock(raw_spinlock_t *lock)  | 
