summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2010-08-09 17:21:15 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-09 20:45:10 -0700
commit345af7bf3304410634c21ada4664fda83d4d9a16 (patch)
tree1fd8b49ef79796e561cce19794af371099f5e7ca /lib
parent7721fea3d0fd93fb4d000eb737b444369358d6d3 (diff)
downloadkernel-mfld-blackbay-345af7bf3304410634c21ada4664fda83d4d9a16.tar.gz
kernel-mfld-blackbay-345af7bf3304410634c21ada4664fda83d4d9a16.tar.bz2
kernel-mfld-blackbay-345af7bf3304410634c21ada4664fda83d4d9a16.zip
rwsem: fully separate code paths to wake writers vs readers
This is in preparation for later changes in the series. In __rwsem_do_wake(), the first queued waiter is checked first in order to determine whether it's a writer or a reader. The code paths diverge at this point. The code that checks and increments the rwsem active count is duplicated on both sides - the point is that later changes in the series will be able to independently modify both sides. Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: David Howells <dhowells@redhat.com> Cc: Mike Waychison <mikew@google.com> Cc: Suleiman Souhlal <suleiman@google.com> Cc: Ying Han <yinghan@google.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/rwsem.c61
1 files changed, 34 insertions, 27 deletions
diff --git a/lib/rwsem.c b/lib/rwsem.c
index ceba8e28807..917fd946b49 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -41,7 +41,7 @@ struct rwsem_waiter {
* - if we come here from up_xxxx(), then:
* - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
* - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
- * - there must be someone on the queue
+ * - there must be someone on the queue
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if downgrading is false
@@ -54,26 +54,23 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
struct list_head *next;
signed long oldcount, woken, loop;
+ waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
+ if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
+ goto readers_only;
+
if (downgrading)
- goto dont_wake_writers;
+ goto out;
- /* if we came through an up_xxxx() call, we only only wake someone up
- * if we can transition the active part of the count from 0 -> 1
+ /* There's a writer at the front of the queue - try to grant it the
+ * write lock. However, we only wake this writer if we can transition
+ * the active part of the count from 0 -> 1
*/
- try_again:
+ try_again_write:
oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
- RWSEM_ACTIVE_BIAS;
if (oldcount & RWSEM_ACTIVE_MASK)
- goto undo;
-
- waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
-
- /* try to grant a single write lock if there's a writer at the front
- * of the queue - note we leave the 'active part' of the count
- * incremented by 1 and the waiting part incremented by 0x00010000
- */
- if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
- goto readers_only;
+ /* Someone grabbed the sem already */
+ goto undo_write;
/* We must be careful not to touch 'waiter' after we set ->task = NULL.
* It is an allocated on the waiter's stack and may become invalid at
@@ -87,18 +84,24 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
put_task_struct(tsk);
goto out;
- /* don't want to wake any writers */
- dont_wake_writers:
- waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
- if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
- goto out;
+ readers_only:
+ if (downgrading)
+ goto wake_readers;
+
+ /* if we came through an up_xxxx() call, we only only wake someone up
+ * if we can transition the active part of the count from 0 -> 1 */
+ try_again_read:
+ oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
+ - RWSEM_ACTIVE_BIAS;
+ if (oldcount & RWSEM_ACTIVE_MASK)
+ /* Someone grabbed the sem already */
+ goto undo_read;
- /* grant an infinite number of read locks to the readers at the front
- * of the queue
- * - note we increment the 'active part' of the count by the number of
- * readers before waking any processes up
+ wake_readers:
+ /* Grant an infinite number of read locks to the readers at the front
+ * of the queue. Note we increment the 'active part' of the count by
+ * the number of readers before waking any processes up.
*/
- readers_only:
woken = 0;
do {
woken++;
@@ -138,10 +141,14 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
/* undo the change to the active count, but check for a transition
* 1->0 */
- undo:
+ undo_write:
+ if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
+ goto out;
+ goto try_again_write;
+ undo_read:
if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
goto out;
- goto try_again;
+ goto try_again_read;
}
/*