From e9f029855865e917821ef6034b31e340a4cfc815 Mon Sep 17 00:00:00 2001 From: Ricardo Labiaga Date: Thu, 18 Jun 2009 22:01:24 -0400 Subject: nfs41: sunrpc: xprt_alloc_bc_request() should not use spin_lock_bh() xprt_alloc_bc_request() is always called in soft interrupt context. Grab the spin_lock instead of the bottom half spin_lock. Softirqs do not preempt other softirqs running on the same processor, so there is no need to disable bottom halves. Signed-off-by: Ricardo Labiaga Signed-off-by: Benny Halevy Signed-off-by: Trond Myklebust --- net/sunrpc/backchannel_rqst.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'net/sunrpc/backchannel_rqst.c') diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 5a7d342e3087..553621fb2c41 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c @@ -211,6 +211,9 @@ EXPORT_SYMBOL(xprt_destroy_backchannel); * has been preallocated as well. Use xprt_alloc_bc_request to allocate * to this request. Use xprt_free_bc_request to return it. * + * We know that we're called in soft interrupt context, grab the spin_lock + * since there is no need to grab the bottom half spin_lock. + * * Return an available rpc_rqst, otherwise NULL if non are available. */ struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt) @@ -218,7 +221,7 @@ struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt) struct rpc_rqst *req; dprintk("RPC: allocate a backchannel request\n"); - spin_lock_bh(&xprt->bc_pa_lock); + spin_lock(&xprt->bc_pa_lock); if (!list_empty(&xprt->bc_pa_list)) { req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, rq_bc_pa_list); @@ -226,7 +229,7 @@ struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt) } else { req = NULL; } - spin_unlock_bh(&xprt->bc_pa_lock); + spin_unlock(&xprt->bc_pa_lock); if (req != NULL) { set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); -- cgit v1.2.3