| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | /* rwsem.c: R/W semaphores: contention handling functions
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Written by David Howells (dhowells@redhat.com). | 
					
						
							|  |  |  |  * Derived from arch/i386/kernel/semaphore.c | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | #include <linux/rwsem.h>
 | 
					
						
							|  |  |  | #include <linux/sched.h>
 | 
					
						
							|  |  |  | #include <linux/init.h>
 | 
					
						
							| 
									
										
										
										
											2011-11-16 21:29:17 -05:00
										 |  |  | #include <linux/export.h>
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2006-07-03 00:24:53 -07:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * Initialize an rwsem: | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | void __init_rwsem(struct rw_semaphore *sem, const char *name, | 
					
						
							|  |  |  | 		  struct lock_class_key *key) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | #ifdef CONFIG_DEBUG_LOCK_ALLOC
 | 
					
						
							|  |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * Make sure we are not reinitializing a held semaphore: | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | 
					
						
							| 
									
										
										
										
											2006-10-11 01:45:14 -04:00
										 |  |  | 	lockdep_init_map(&sem->dep_map, name, key, 0); | 
					
						
							| 
									
										
										
										
											2006-07-03 00:24:53 -07:00
										 |  |  | #endif
 | 
					
						
							|  |  |  | 	sem->count = RWSEM_UNLOCKED_VALUE; | 
					
						
							| 
									
										
										
										
											2010-02-24 09:54:54 +01:00
										 |  |  | 	raw_spin_lock_init(&sem->wait_lock); | 
					
						
							| 
									
										
										
										
											2006-07-03 00:24:53 -07:00
										 |  |  | 	INIT_LIST_HEAD(&sem->wait_list); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | EXPORT_SYMBOL(__init_rwsem); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | struct rwsem_waiter { | 
					
						
							|  |  |  | 	struct list_head list; | 
					
						
							|  |  |  | 	struct task_struct *task; | 
					
						
							|  |  |  | 	unsigned int flags; | 
					
						
							|  |  |  | #define RWSEM_WAITING_FOR_READ	0x00000001
 | 
					
						
							|  |  |  | #define RWSEM_WAITING_FOR_WRITE	0x00000002
 | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:17 -07:00
										 |  |  | /* Wake types for __rwsem_do_wake().  Note that RWSEM_WAKE_NO_ACTIVE and
 | 
					
						
							|  |  |  |  * RWSEM_WAKE_READ_OWNED imply that the spinlock must have been kept held | 
					
						
							|  |  |  |  * since the rwsem value was observed. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | #define RWSEM_WAKE_ANY        0 /* Wake whatever's at head of wait list */
 | 
					
						
							|  |  |  | #define RWSEM_WAKE_NO_ACTIVE  1 /* rwsem was observed with no active thread */
 | 
					
						
							|  |  |  | #define RWSEM_WAKE_READ_OWNED 2 /* rwsem was observed to be read owned */
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * handle the lock release when processes blocked on it that can now run | 
					
						
							|  |  |  |  * - if we come here from up_xxxx(), then: | 
					
						
							|  |  |  |  *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) | 
					
						
							|  |  |  |  *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:15 -07:00
										 |  |  |  * - there must be someone on the queue | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  * - the spinlock must be held by the caller | 
					
						
							|  |  |  |  * - woken process blocks are discarded from the list after having task zeroed | 
					
						
							|  |  |  |  * - writers are only woken if downgrading is false | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:17 -07:00
										 |  |  | static struct rw_semaphore * | 
					
						
							|  |  |  | __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | { | 
					
						
							|  |  |  | 	struct rwsem_waiter *waiter; | 
					
						
							|  |  |  | 	struct task_struct *tsk; | 
					
						
							|  |  |  | 	struct list_head *next; | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:18 -07:00
										 |  |  | 	signed long oldcount, woken, loop, adjustment; | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:15 -07:00
										 |  |  | 	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | 
					
						
							|  |  |  | 	if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) | 
					
						
							|  |  |  | 		goto readers_only; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:17 -07:00
										 |  |  | 	if (wake_type == RWSEM_WAKE_READ_OWNED) | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:19 -07:00
										 |  |  | 		/* Another active reader was observed, so wakeup is not
 | 
					
						
							|  |  |  | 		 * likely to succeed. Save the atomic op. | 
					
						
							|  |  |  | 		 */ | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:15 -07:00
										 |  |  | 		goto out; | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:15 -07:00
										 |  |  | 	/* There's a writer at the front of the queue - try to grant it the
 | 
					
						
							|  |  |  | 	 * write lock.  However, we only wake this writer if we can transition | 
					
						
							|  |  |  | 	 * the active part of the count from 0 -> 1 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	 */ | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:18 -07:00
										 |  |  | 	adjustment = RWSEM_ACTIVE_WRITE_BIAS; | 
					
						
							|  |  |  | 	if (waiter->list.next == &sem->wait_list) | 
					
						
							|  |  |  | 		adjustment -= RWSEM_WAITING_BIAS; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:15 -07:00
										 |  |  |  try_again_write: | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:18 -07:00
										 |  |  | 	oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	if (oldcount & RWSEM_ACTIVE_MASK) | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:15 -07:00
										 |  |  | 		/* Someone grabbed the sem already */ | 
					
						
							|  |  |  | 		goto undo_write; | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	/* We must be careful not to touch 'waiter' after we set ->task = NULL.
 | 
					
						
							|  |  |  | 	 * It is an allocated on the waiter's stack and may become invalid at | 
					
						
							|  |  |  | 	 * any time after that point (due to a wakeup from another source). | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	list_del(&waiter->list); | 
					
						
							|  |  |  | 	tsk = waiter->task; | 
					
						
							| 
									
										
										
										
											2005-05-01 08:58:47 -07:00
										 |  |  | 	smp_mb(); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	waiter->task = NULL; | 
					
						
							|  |  |  | 	wake_up_process(tsk); | 
					
						
							|  |  |  | 	put_task_struct(tsk); | 
					
						
							|  |  |  | 	goto out; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:15 -07:00
										 |  |  |  readers_only: | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:17 -07:00
										 |  |  | 	/* If we come here from up_xxxx(), another thread might have reached
 | 
					
						
							|  |  |  | 	 * rwsem_down_failed_common() before we acquired the spinlock and | 
					
						
							|  |  |  | 	 * woken up a waiter, making it now active.  We prefer to check for | 
					
						
							|  |  |  | 	 * this first in order to not spend too much time with the spinlock | 
					
						
							|  |  |  | 	 * held if we're not going to be able to wake up readers in the end. | 
					
						
							|  |  |  | 	 * | 
					
						
							|  |  |  | 	 * Note that we do not need to update the rwsem count: any writer | 
					
						
							|  |  |  | 	 * trying to acquire rwsem will run rwsem_down_write_failed() due | 
					
						
							|  |  |  | 	 * to the waiting threads and block trying to acquire the spinlock. | 
					
						
							|  |  |  | 	 * | 
					
						
							|  |  |  | 	 * We use a dummy atomic update in order to acquire the cache line | 
					
						
							|  |  |  | 	 * exclusively since we expect to succeed and run the final rwsem | 
					
						
							|  |  |  | 	 * count adjustment pretty soon. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	if (wake_type == RWSEM_WAKE_ANY && | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:19 -07:00
										 |  |  | 	    rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS) | 
					
						
							|  |  |  | 		/* Someone grabbed the sem for write already */ | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:17 -07:00
										 |  |  | 		goto out; | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:15 -07:00
										 |  |  | 	/* Grant an infinite number of read locks to the readers at the front
 | 
					
						
							|  |  |  | 	 * of the queue.  Note we increment the 'active part' of the count by | 
					
						
							|  |  |  | 	 * the number of readers before waking any processes up. | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	 */ | 
					
						
							|  |  |  | 	woken = 0; | 
					
						
							|  |  |  | 	do { | 
					
						
							|  |  |  | 		woken++; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if (waiter->list.next == &sem->wait_list) | 
					
						
							|  |  |  | 			break; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		waiter = list_entry(waiter->list.next, | 
					
						
							|  |  |  | 					struct rwsem_waiter, list); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	} while (waiter->flags & RWSEM_WAITING_FOR_READ); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:18 -07:00
										 |  |  | 	adjustment = woken * RWSEM_ACTIVE_READ_BIAS; | 
					
						
							|  |  |  | 	if (waiter->flags & RWSEM_WAITING_FOR_READ) | 
					
						
							|  |  |  | 		/* hit end of list above */ | 
					
						
							|  |  |  | 		adjustment -= RWSEM_WAITING_BIAS; | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:18 -07:00
										 |  |  | 	rwsem_atomic_add(adjustment, sem); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	next = sem->wait_list.next; | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:18 -07:00
										 |  |  | 	for (loop = woken; loop > 0; loop--) { | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 		waiter = list_entry(next, struct rwsem_waiter, list); | 
					
						
							|  |  |  | 		next = waiter->list.next; | 
					
						
							|  |  |  | 		tsk = waiter->task; | 
					
						
							| 
									
										
										
										
											2005-05-01 08:58:47 -07:00
										 |  |  | 		smp_mb(); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 		waiter->task = NULL; | 
					
						
							|  |  |  | 		wake_up_process(tsk); | 
					
						
							|  |  |  | 		put_task_struct(tsk); | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	sem->wait_list.next = next; | 
					
						
							|  |  |  | 	next->prev = &sem->wait_list; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |  out: | 
					
						
							|  |  |  | 	return sem; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-05-12 11:38:45 +01:00
										 |  |  | 	/* undo the change to the active count, but check for a transition
 | 
					
						
							|  |  |  | 	 * 1->0 */ | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:15 -07:00
										 |  |  |  undo_write: | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:18 -07:00
										 |  |  | 	if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK) | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:15 -07:00
										 |  |  | 		goto out; | 
					
						
							|  |  |  | 	goto try_again_write; | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * wait for a lock to be granted | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2007-12-18 15:21:13 +01:00
										 |  |  | static struct rw_semaphore __sched * | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | rwsem_down_failed_common(struct rw_semaphore *sem, | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:20 -07:00
										 |  |  | 			 unsigned int flags, signed long adjustment) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:20 -07:00
										 |  |  | 	struct rwsem_waiter waiter; | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	struct task_struct *tsk = current; | 
					
						
							|  |  |  | 	signed long count; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* set up my own style of waitqueue */ | 
					
						
							| 
									
										
										
										
											2010-02-24 09:54:54 +01:00
										 |  |  | 	raw_spin_lock_irq(&sem->wait_lock); | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:20 -07:00
										 |  |  | 	waiter.task = tsk; | 
					
						
							|  |  |  | 	waiter.flags = flags; | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	get_task_struct(tsk); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:18 -07:00
										 |  |  | 	if (list_empty(&sem->wait_list)) | 
					
						
							|  |  |  | 		adjustment += RWSEM_WAITING_BIAS; | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:20 -07:00
										 |  |  | 	list_add_tail(&waiter.list, &sem->wait_list); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:17 -07:00
										 |  |  | 	/* we're now waiting on the lock, but no longer actively locking */ | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	count = rwsem_atomic_update(adjustment, sem); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:19 -07:00
										 |  |  | 	/* If there are no active locks, wake the front queued process(es) up.
 | 
					
						
							|  |  |  | 	 * | 
					
						
							|  |  |  | 	 * Alternatively, if we're called from a failed down_write(), there | 
					
						
							|  |  |  | 	 * were already threads queued before us and there are no active | 
					
						
							|  |  |  | 	 * writers, the lock must be read owned; so we try to wake any read | 
					
						
							|  |  |  | 	 * locks that were queued ahead of us. */ | 
					
						
							|  |  |  | 	if (count == RWSEM_WAITING_BIAS) | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:17 -07:00
										 |  |  | 		sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE); | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:19 -07:00
										 |  |  | 	else if (count > RWSEM_WAITING_BIAS && | 
					
						
							|  |  |  | 		 adjustment == -RWSEM_ACTIVE_WRITE_BIAS) | 
					
						
							|  |  |  | 		sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-02-24 09:54:54 +01:00
										 |  |  | 	raw_spin_unlock_irq(&sem->wait_lock); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	/* wait to be given the lock */ | 
					
						
							|  |  |  | 	for (;;) { | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:20 -07:00
										 |  |  | 		if (!waiter.task) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 			break; | 
					
						
							|  |  |  | 		schedule(); | 
					
						
							|  |  |  | 		set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	tsk->state = TASK_RUNNING; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return sem; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * wait for the read lock to be granted | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2011-01-26 21:32:01 +01:00
										 |  |  | struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:20 -07:00
										 |  |  | 	return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ, | 
					
						
							|  |  |  | 					-RWSEM_ACTIVE_READ_BIAS); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * wait for the write lock to be granted | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2011-01-26 21:32:01 +01:00
										 |  |  | struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:20 -07:00
										 |  |  | 	return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE, | 
					
						
							|  |  |  | 					-RWSEM_ACTIVE_WRITE_BIAS); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * handle waking up a waiter on the semaphore | 
					
						
							|  |  |  |  * - up_read/up_write has decremented the active part of count if we come here | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2011-01-26 21:32:01 +01:00
										 |  |  | struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | { | 
					
						
							|  |  |  | 	unsigned long flags; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-02-24 09:54:54 +01:00
										 |  |  | 	raw_spin_lock_irqsave(&sem->wait_lock, flags); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	/* do nothing if list empty */ | 
					
						
							|  |  |  | 	if (!list_empty(&sem->wait_list)) | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:17 -07:00
										 |  |  | 		sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-02-24 09:54:54 +01:00
										 |  |  | 	raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return sem; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * downgrade a write lock into a read lock | 
					
						
							|  |  |  |  * - caller incremented waiting part of count and discovered it still negative | 
					
						
							|  |  |  |  * - just wake up any readers at the front of the queue | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2011-01-26 21:32:01 +01:00
										 |  |  | struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | { | 
					
						
							|  |  |  | 	unsigned long flags; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-02-24 09:54:54 +01:00
										 |  |  | 	raw_spin_lock_irqsave(&sem->wait_lock, flags); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	/* do nothing if list empty */ | 
					
						
							|  |  |  | 	if (!list_empty(&sem->wait_list)) | 
					
						
							| 
									
										
										
										
											2010-08-09 17:21:17 -07:00
										 |  |  | 		sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-02-24 09:54:54 +01:00
										 |  |  | 	raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return sem; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | EXPORT_SYMBOL(rwsem_down_read_failed); | 
					
						
							|  |  |  | EXPORT_SYMBOL(rwsem_down_write_failed); | 
					
						
							|  |  |  | EXPORT_SYMBOL(rwsem_wake); | 
					
						
							|  |  |  | EXPORT_SYMBOL(rwsem_downgrade_wake); |