From 2fefb8a09e7ed251ae8996e0c69066e74c5aa560 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 29 Nov 2011 11:35:35 -0500 Subject: svcrpc: destroy server sockets all at once There's no reason I can see that we need to call sv_shutdown between closing the two lists of sockets. Cc: stable@kernel.org Signed-off-by: J. Bruce Fields --- net/sunrpc/svc.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'net/sunrpc/svc.c') diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 6e038884ae0c..60babf0a9847 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -528,16 +528,11 @@ svc_destroy(struct svc_serv *serv) del_timer_sync(&serv->sv_temptimer); - svc_close_all(&serv->sv_tempsocks); + svc_close_all(serv); if (serv->sv_shutdown) serv->sv_shutdown(serv); - svc_close_all(&serv->sv_permsocks); - - BUG_ON(!list_empty(&serv->sv_permsocks)); - BUG_ON(!list_empty(&serv->sv_tempsocks)); - cache_clean_deferred(serv); if (svc_serv_is_pooled(serv)) -- cgit v1.2.3 From b4f36f88b3ee7cf26bf0be84e6c7fc15f84dcb71 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 29 Nov 2011 17:00:26 -0500 Subject: svcrpc: avoid memory-corruption on pool shutdown Socket callbacks use svc_xprt_enqueue() to add an xprt to a pool->sp_sockets list. In normal operation a server thread will later come along and take the xprt off that list. On shutdown, after all the threads have exited, we instead manually walk the sv_tempsocks and sv_permsocks lists to find all the xprt's and delete them. So the sp_sockets lists don't really matter any more. As a result, we've mostly just ignored them and hoped they would go away. Which has gotten us into trouble; witness for example ebc63e531cc6 "svcrpc: fix list-corrupting race on nfsd shutdown", the result of Ben Greear noticing that a still-running svc_xprt_enqueue() could re-add an xprt to an sp_sockets list just before it was deleted. The fix was to remove it from the list at the end of svc_delete_xprt(). But that only made corruption less likely--I can see nothing that prevents a svc_xprt_enqueue() from adding another xprt to the list at the same moment that we're removing this xprt from the list. In fact, despite the earlier xpo_detach(), I don't even see what guarantees that svc_xprt_enqueue() couldn't still be running on this xprt. So, instead, note that svc_xprt_enqueue() essentially does: lock sp_lock if XPT_BUSY unset add to sp_sockets unlock sp_lock So, if we do: set XPT_BUSY on every xprt. Empty every sp_sockets list, under the sp_socks locks. Then we're left knowing that the sp_sockets lists are all empty and will stay that way, since any svc_xprt_enqueue() will check XPT_BUSY under the sp_lock and see it set. And *then* we can continue deleting the xprt's. (Thanks to Jeff Layton for being correctly suspicious of this code....) Cc: Ben Greear Cc: Jeff Layton Cc: stable@kernel.org Signed-off-by: J. Bruce Fields --- net/sunrpc/svc.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'net/sunrpc/svc.c') diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 60babf0a9847..1a6c16ed7fa6 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -527,7 +527,15 @@ svc_destroy(struct svc_serv *serv) printk("svc_destroy: no threads for serv=%p!\n", serv); del_timer_sync(&serv->sv_temptimer); - + /* + * The set of xprts (contained in the sv_tempsocks and + * sv_permsocks lists) is now constant, since it is modified + * only by accepting new sockets (done by service threads in + * svc_recv) or aging old ones (done by sv_temptimer), or + * configuration changes (excluded by whatever locking the + * caller is using--nfsd_mutex in the case of nfsd). So it's + * safe to traverse those lists and shut everything down: + */ svc_close_all(serv); if (serv->sv_shutdown) -- cgit v1.2.3 From 94cf3179ccfc69d727dd884fd0831d82ada6bb06 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 1 Dec 2011 17:51:21 -0500 Subject: svcrpc: update outdated BKL comment Signed-off-by: J. Bruce Fields --- net/sunrpc/svc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net/sunrpc/svc.c') diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 1a6c16ed7fa6..e9632bb66535 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -686,8 +686,8 @@ found_pool: * Create or destroy enough new threads to make the number * of threads the given number. If `pool' is non-NULL, applies * only to threads in that pool, otherwise round-robins between - * all pools. Must be called with a svc_get() reference and - * the BKL or another lock to protect access to svc_serv fields. + * all pools. Caller must ensure that mutual exclusion between this and + * server startup or shutdown. * * Destroying threads relies on the service threads filling in * rqstp->rq_task, which only the nfs ones do. Assumes the serv -- cgit v1.2.3 From 61c8504c428edcebf23b97775a129c5b393a302b Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 22 Dec 2011 18:22:49 -0700 Subject: svcrpc: fix double-free on shutdown of nfsd after changing pool mode The pool_to and to_pool fields of the global svc_pool_map are freed on shutdown, but are initialized in nfsd startup only in the SVC_POOL_PERCPU and SVC_POOL_PERNODE cases. They *are* initialized to zero on kernel startup. So as long as you use only SVC_POOL_GLOBAL (the default), this will never be a problem. You're also OK if you only ever use SVC_POOL_PERCPU or SVC_POOL_PERNODE. However, the following sequence events leads to a double-free: 1. set SVC_POOL_PERCPU or SVC_POOL_PERNODE 2. start nfsd: both fields are initialized. 3. shutdown nfsd: both fields are freed. 4. set SVC_POOL_GLOBAL 5. start nfsd: the fields are left untouched. 6. shutdown nfsd: now we try to free them again. Step 4 is actually unnecessary, since (for some bizarre reason), nfsd automatically resets the pool mode to SVC_POOL_GLOBAL on shutdown. Cc: stable@kernel.org Signed-off-by: J. Bruce Fields --- net/sunrpc/svc.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net/sunrpc/svc.c') diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index e9632bb66535..1dd5fd014559 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -167,6 +167,7 @@ svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools) fail_free: kfree(m->to_pool); + m->to_pool = NULL; fail: return -ENOMEM; } @@ -287,7 +288,9 @@ svc_pool_map_put(void) if (!--m->count) { m->mode = SVC_POOL_DEFAULT; kfree(m->to_pool); + m->to_pool = NULL; kfree(m->pool_to); + m->pool_to = NULL; m->npools = 0; } -- cgit v1.2.3 From 9689dcce0b456793c46bdeea7a79adfab1bc9c5d Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Fri, 23 Dec 2011 13:52:19 -0700 Subject: svcrpc: don't revert to SVC_POOL_DEFAULT on nfsd shutdown This was unexpected behavior (at least for me)--why would you want configuration settings automatically lost on nfsd restart? In practice this won't affect distributions, which likely set everything on every startup. But I'd expect the behavior to be less confusing to someone manually restarting nfsd for testing. Signed-off-by: J. Bruce Fields --- net/sunrpc/svc.c | 1 - 1 file changed, 1 deletion(-) (limited to 'net/sunrpc/svc.c') diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 1dd5fd014559..97017989fa1d 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -286,7 +286,6 @@ svc_pool_map_put(void) mutex_lock(&svc_pool_map_mutex); if (!--m->count) { - m->mode = SVC_POOL_DEFAULT; kfree(m->to_pool); m->to_pool = NULL; kfree(m->pool_to); -- cgit v1.2.3