summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-02-04 22:27:20 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 09:44:07 -0800
commit0ccf831cbee94df9c5006dd46248c0f07847dd7c (patch)
tree4de8d53c51dc4aff80f35a95cdd185229f0df79e
parent96cf49a2c13e8dcf442abaadf6645f6a1fb3ae92 (diff)
downloadlinux-3.10-0ccf831cbee94df9c5006dd46248c0f07847dd7c.tar.gz
linux-3.10-0ccf831cbee94df9c5006dd46248c0f07847dd7c.tar.bz2
linux-3.10-0ccf831cbee94df9c5006dd46248c0f07847dd7c.zip
lockdep: annotate epoll
On Sat, 2008-01-05 at 13:35 -0800, Davide Libenzi wrote: > I remember I talked with Arjan about this time ago. Basically, since 1) > you can drop an epoll fd inside another epoll fd 2) callback-based wakeups > are used, you can see a wake_up() from inside another wake_up(), but they > will never refer to the same lock instance. > Think about: > > dfd = socket(...); > efd1 = epoll_create(); > efd2 = epoll_create(); > epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...); > epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...); > > When a packet arrives to the device underneath "dfd", the net code will > issue a wake_up() on its poll wake list. Epoll (efd1) has installed a > callback wakeup entry on that queue, and the wake_up() performed by the > "dfd" net code will end up in ep_poll_callback(). At this point epoll > (efd1) notices that it may have some event ready, so it needs to wake up > the waiters on its poll wait list (efd2). So it calls ep_poll_safewake() > that ends up in another wake_up(), after having checked about the > recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to > avoid stack blasting. Never hit the same queue, to avoid loops like: > > epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...); > epoll_ctl(efd3, EPOLL_CTL_ADD, efd2, ...); > epoll_ctl(efd4, EPOLL_CTL_ADD, efd3, ...); > epoll_ctl(efd1, EPOLL_CTL_ADD, efd4, ...); > > The code "if (tncur->wq == wq || ..." prevents re-entering the same > queue/lock. Since the epoll code is very careful to not nest same instance locks allow the recursion. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Stefan Richter <stefanr@s5r6.in-berlin.de> Acked-by: Davide Libenzi <davidel@xmailserver.org> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/eventpoll.c2
-rw-r--r--include/linux/wait.h16
2 files changed, 17 insertions, 1 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 81c04abfb1a..a415f42d32c 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -353,7 +353,7 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq)
spin_unlock_irqrestore(&psw->lock, flags);
/* Do really wake up now */
- wake_up(wq);
+ wake_up_nested(wq, 1 + wake_nests);
/* Remove the current task from the list */
spin_lock_irqsave(&psw->lock, flags);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 1f4fb0a81ec..33a2aa9e02f 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -162,6 +162,22 @@ wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/*
+ * macro to avoid include hell
+ */
+#define wake_up_nested(x, s) \
+do { \
+ unsigned long flags; \
+ \
+ spin_lock_irqsave_nested(&(x)->lock, flags, (s)); \
+ wake_up_locked(x); \
+ spin_unlock_irqrestore(&(x)->lock, flags); \
+} while (0)
+#else
+#define wake_up_nested(x, s) wake_up(x)
+#endif
+
#define __wait_event(wq, condition) \
do { \
DEFINE_WAIT(__wait); \