summaryrefslogtreecommitdiff
path: root/net/9p
diff options
context:
space:
mode:
authorSimon Derr <simon.derr@bull.net>2012-09-17 15:16:31 +0200
committerEric Van Hensbergen <ericvh@gmail.com>2012-10-11 12:03:31 -0500
commit759f42987f98915764bad922ee123acb0eadbe33 (patch)
tree57968c56307e7ea636a7558b15edb669dfb14399 /net/9p
parent584a8c13d58423462680907d4cc40d9929c9030a (diff)
downloadkernel-common-759f42987f98915764bad922ee123acb0eadbe33.tar.gz
kernel-common-759f42987f98915764bad922ee123acb0eadbe33.tar.bz2
kernel-common-759f42987f98915764bad922ee123acb0eadbe33.zip
9P: Fix race between p9_write_work() and p9_fd_request()
Race scenario: thread A thread B p9_write_work() p9_fd_request() if (list_empty (&m->unsent_req_list)) ... spin_lock(&client->lock); req->status = REQ_STATUS_UNSENT; list_add_tail(..., &m->unsent_req_list); spin_unlock(&client->lock); .... if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched) schedule_work(&m->wq); --> not done because Wworksched is set clear_bit(Wworksched, &m->wsched); return; --> nobody will take care of sending the new request. This is not very likely to happen though, because p9_write_work() being called with an empty unsent_req_list is not frequent. But this also means that taking the lock earlier will not be costly. Signed-off-by: Simon Derr <simon.derr@bull.net> Signed-off-by: Eric Van Hensbergen <ericvh@gmail.com>
Diffstat (limited to 'net/9p')
-rw-r--r--net/9p/trans_fd.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index b2c308fffb8a..0031a8cf145d 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -453,12 +453,13 @@ static void p9_write_work(struct work_struct *work)
}
if (!m->wsize) {
+ spin_lock(&m->client->lock);
if (list_empty(&m->unsent_req_list)) {
clear_bit(Wworksched, &m->wsched);
+ spin_unlock(&m->client->lock);
return;
}
- spin_lock(&m->client->lock);
req = list_entry(m->unsent_req_list.next, struct p9_req_t,
req_list);
req->status = REQ_STATUS_SENT;