diff options
| author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-07-05 13:13:03 -0400 | 
|---|---|---|
| committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-07-05 13:13:03 -0400 | 
| commit | 5e66dd6d66ffe758b39b6dcadf2330753ee1159b (patch) | |
| tree | a72cdcff4448e4af9425cc213ddf56ab23e697fe /lib/rwsem-spinlock.c | |
| parent | 026477c1141b67e98e3bd8bdedb7d4b88a3ecd09 (diff) | |
| parent | ca78f6baca863afe2e6a244a0fe94b3a70211d46 (diff) | |
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Diffstat (limited to 'lib/rwsem-spinlock.c')
| -rw-r--r-- | lib/rwsem-spinlock.c | 66 | 
1 files changed, 19 insertions, 47 deletions
| diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 40ffde940a86..db4fed74b940 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -17,27 +17,22 @@ struct rwsem_waiter {  #define RWSEM_WAITING_FOR_WRITE	0x00000002  }; -#if RWSEM_DEBUG -void rwsemtrace(struct rw_semaphore *sem, const char *str) -{ -	if (sem->debug) -		printk("[%d] %s({%d,%d})\n", -		       current->pid, str, sem->activity, -		       list_empty(&sem->wait_list) ? 0 : 1); -} -#endif -  /*   * initialise the semaphore   */ -void fastcall init_rwsem(struct rw_semaphore *sem) +void __init_rwsem(struct rw_semaphore *sem, const char *name, +		  struct lock_class_key *key)  { +#ifdef CONFIG_DEBUG_LOCK_ALLOC +	/* +	 * Make sure we are not reinitializing a held semaphore: +	 */ +	debug_check_no_locks_freed((void *)sem, sizeof(*sem)); +	lockdep_init_map(&sem->dep_map, name, key); +#endif  	sem->activity = 0;  	spin_lock_init(&sem->wait_lock);  	INIT_LIST_HEAD(&sem->wait_list); -#if RWSEM_DEBUG -	sem->debug = 0; -#endif  }  /* @@ -56,8 +51,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)  	struct task_struct *tsk;  	int woken; -	rwsemtrace(sem, "Entering __rwsem_do_wake"); -  	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);  	if (!wakewrite) { @@ -104,7 +97,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)  	sem->activity += woken;   out: -	rwsemtrace(sem, "Leaving __rwsem_do_wake");  	return sem;  } @@ -138,8 +130,6 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)  	struct rwsem_waiter waiter;  	struct task_struct *tsk; -	rwsemtrace(sem, "Entering __down_read"); -  	spin_lock_irq(&sem->wait_lock);  	if (sem->activity >= 0 && list_empty(&sem->wait_list)) { @@ -171,9 +161,8 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)  	}  	tsk->state = TASK_RUNNING; -   out: -	rwsemtrace(sem, "Leaving __down_read"); +	;  }  /* @@ -184,7 +173,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)  	unsigned long flags;  	int ret = 0; -	rwsemtrace(sem, "Entering __down_read_trylock");  	spin_lock_irqsave(&sem->wait_lock, flags); @@ -196,7 +184,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)  	spin_unlock_irqrestore(&sem->wait_lock, flags); -	rwsemtrace(sem, "Leaving __down_read_trylock");  	return ret;  } @@ -204,13 +191,11 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)   * get a write lock on the semaphore   * - we increment the waiting count anyway to indicate an exclusive lock   */ -void fastcall __sched __down_write(struct rw_semaphore *sem) +void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass)  {  	struct rwsem_waiter waiter;  	struct task_struct *tsk; -	rwsemtrace(sem, "Entering __down_write"); -  	spin_lock_irq(&sem->wait_lock);  	if (sem->activity == 0 && list_empty(&sem->wait_list)) { @@ -242,9 +227,13 @@ void fastcall __sched __down_write(struct rw_semaphore *sem)  	}  	tsk->state = TASK_RUNNING; -   out: -	rwsemtrace(sem, "Leaving __down_write"); +	; +} + +void fastcall __sched __down_write(struct rw_semaphore *sem) +{ +	__down_write_nested(sem, 0);  }  /* @@ -255,8 +244,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)  	unsigned long flags;  	int ret = 0; -	rwsemtrace(sem, "Entering __down_write_trylock"); -  	spin_lock_irqsave(&sem->wait_lock, flags);  	if (sem->activity == 0 && list_empty(&sem->wait_list)) { @@ -267,7 +254,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)  	spin_unlock_irqrestore(&sem->wait_lock, flags); -	rwsemtrace(sem, "Leaving __down_write_trylock");  	return ret;  } @@ -278,16 +264,12 @@ void fastcall __up_read(struct rw_semaphore *sem)  {  	unsigned long flags; -	rwsemtrace(sem, "Entering __up_read"); -  	spin_lock_irqsave(&sem->wait_lock, flags);  	if (--sem->activity == 0 && !list_empty(&sem->wait_list))  		sem = __rwsem_wake_one_writer(sem);  	spin_unlock_irqrestore(&sem->wait_lock, flags); - -	rwsemtrace(sem, "Leaving __up_read");  }  /* @@ -297,8 +279,6 @@ void fastcall __up_write(struct rw_semaphore *sem)  {  	unsigned long flags; -	rwsemtrace(sem, "Entering __up_write"); -  	spin_lock_irqsave(&sem->wait_lock, flags);  	sem->activity = 0; @@ -306,8 +286,6 @@ void fastcall __up_write(struct rw_semaphore *sem)  		sem = __rwsem_do_wake(sem, 1);  	spin_unlock_irqrestore(&sem->wait_lock, flags); - -	rwsemtrace(sem, "Leaving __up_write");  }  /* @@ -318,8 +296,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem)  {  	unsigned long flags; -	rwsemtrace(sem, "Entering __downgrade_write"); -  	spin_lock_irqsave(&sem->wait_lock, flags);  	sem->activity = 1; @@ -327,18 +303,14 @@ void fastcall __downgrade_write(struct rw_semaphore *sem)  		sem = __rwsem_do_wake(sem, 0);  	spin_unlock_irqrestore(&sem->wait_lock, flags); - -	rwsemtrace(sem, "Leaving __downgrade_write");  } -EXPORT_SYMBOL(init_rwsem); +EXPORT_SYMBOL(__init_rwsem);  EXPORT_SYMBOL(__down_read);  EXPORT_SYMBOL(__down_read_trylock); +EXPORT_SYMBOL(__down_write_nested);  EXPORT_SYMBOL(__down_write);  EXPORT_SYMBOL(__down_write_trylock);  EXPORT_SYMBOL(__up_read);  EXPORT_SYMBOL(__up_write);  EXPORT_SYMBOL(__downgrade_write); -#if RWSEM_DEBUG -EXPORT_SYMBOL(rwsemtrace); -#endif | 
