summaryrefslogtreecommitdiff
path: root/ipc/sem.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2009-12-15 16:47:28 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-16 07:20:09 -0800
commitbf17bb717759d50a2733a7a8157a7c4a25d93abc (patch)
tree22468665036b37922664529df976c890c5bed2d0 /ipc/sem.c
parent7d6feeb287c61aafa88f06345387b1188edf4b86 (diff)
downloadkernel-mfld-blackbay-bf17bb717759d50a2733a7a8157a7c4a25d93abc.tar.gz
kernel-mfld-blackbay-bf17bb717759d50a2733a7a8157a7c4a25d93abc.tar.bz2
kernel-mfld-blackbay-bf17bb717759d50a2733a7a8157a7c4a25d93abc.zip
ipc/sem.c: sem optimise undo list search
Around a month ago, there was some discussion about an improvement of the sysv sem algorithm: Most (at least: some important) users only use simple semaphore operations, therefore it's worthwile to optimize this use case. This patch: Move last looked up sem_undo struct to the head of the task's undo list. Attempt to move common entries to the front of the list so search time is reduced. This reduces lookup_undo on oprofile of problematic SAP workload by 30% (see patch 4 for a description of SAP workload). Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Manfred Spraul <manfred@colorfullife.com> Cc: Pierre Peiffer <peifferp@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc/sem.c')
-rw-r--r--ipc/sem.c26
1 files changed, 20 insertions, 6 deletions
diff --git a/ipc/sem.c b/ipc/sem.c
index 2f2a4795957..cb0070ecf5b 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -962,17 +962,31 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
return 0;
}
-static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
+static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
{
- struct sem_undo *walk;
+ struct sem_undo *un;
- list_for_each_entry_rcu(walk, &ulp->list_proc, list_proc) {
- if (walk->semid == semid)
- return walk;
+ list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
+ if (un->semid == semid)
+ return un;
}
return NULL;
}
+static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
+{
+ struct sem_undo *un;
+
+ assert_spin_locked(&ulp->lock);
+
+ un = __lookup_undo(ulp, semid);
+ if (un) {
+ list_del_rcu(&un->list_proc);
+ list_add_rcu(&un->list_proc, &ulp->list_proc);
+ }
+ return un;
+}
+
/**
* find_alloc_undo - Lookup (and if not present create) undo array
* @ns: namespace
@@ -1308,7 +1322,7 @@ void exit_sem(struct task_struct *tsk)
if (IS_ERR(sma))
continue;
- un = lookup_undo(ulp, semid);
+ un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.