summaryrefslogtreecommitdiff
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2007-03-29 16:47:53 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2007-04-30 22:17:10 -0700
commit2bea90d43a050bbc4021d44e59beb34f384438db (patch)
tree2dd3f15bd9df537166a82777b0c95243b90b17e1 /net/sunrpc
parent511d2e8855a065c8251d0c140ebc353854f1929e (diff)
downloadlinux-3.10-2bea90d43a050bbc4021d44e59beb34f384438db.tar.gz
linux-3.10-2bea90d43a050bbc4021d44e59beb34f384438db.tar.bz2
linux-3.10-2bea90d43a050bbc4021d44e59beb34f384438db.zip
SUNRPC: RPC buffer size estimates are too large
The RPC buffer size estimation logic in net/sunrpc/clnt.c always significantly overestimates the requirements for the buffer size. A little instrumentation demonstrated that in fact rpc_malloc was never allocating the buffer from the mempool, but almost always called kmalloc. To compute the size of the RPC buffer more precisely, split p_bufsiz into two fields; one for the argument size, and one for the result size. Then, compute the sum of the exact call and reply header sizes, and split the RPC buffer precisely between the two. That should keep almost all RPC buffers within the 2KiB buffer mempool limit. And, we can finally be rid of RPC_SLACK_SPACE! Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/clnt.c62
-rw-r--r--net/sunrpc/pmap_clnt.c9
-rw-r--r--net/sunrpc/xprt.c1
3 files changed, 44 insertions, 28 deletions
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 396cdbe249d..12487aafaab 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -36,8 +36,6 @@
#include <linux/sunrpc/metrics.h>
-#define RPC_SLACK_SPACE (1024) /* total overkill */
-
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_CALL
#endif
@@ -747,21 +745,37 @@ call_reserveresult(struct rpc_task *task)
static void
call_allocate(struct rpc_task *task)
{
+ unsigned int slack = task->tk_auth->au_cslack;
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = task->tk_xprt;
- unsigned int bufsiz;
+ struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
dprint_status(task);
+ task->tk_status = 0;
task->tk_action = call_bind;
+
if (req->rq_buffer)
return;
- /* FIXME: compute buffer requirements more exactly using
- * auth->au_wslack */
- bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE;
+ if (proc->p_proc != 0) {
+ BUG_ON(proc->p_arglen == 0);
+ if (proc->p_decode != NULL)
+ BUG_ON(proc->p_replen == 0);
+ }
- if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL)
+ /*
+ * Calculate the size (in quads) of the RPC call
+ * and reply headers, and convert both values
+ * to byte sizes.
+ */
+ req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
+ req->rq_callsize <<= 2;
+ req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
+ req->rq_rcvsize <<= 2;
+
+ xprt->ops->buf_alloc(task, req->rq_callsize + req->rq_rcvsize);
+ if (req->rq_buffer != NULL)
return;
dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
@@ -788,6 +802,17 @@ rpc_task_force_reencode(struct rpc_task *task)
task->tk_rqstp->rq_snd_buf.len = 0;
}
+static inline void
+rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
+{
+ buf->head[0].iov_base = start;
+ buf->head[0].iov_len = len;
+ buf->tail[0].iov_len = 0;
+ buf->page_len = 0;
+ buf->len = 0;
+ buf->buflen = len;
+}
+
/*
* 3. Encode arguments of an RPC call
*/
@@ -795,28 +820,17 @@ static void
call_encode(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
- struct xdr_buf *sndbuf = &req->rq_snd_buf;
- struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- unsigned int bufsiz;
kxdrproc_t encode;
__be32 *p;
dprint_status(task);
- /* Default buffer setup */
- bufsiz = req->rq_bufsize >> 1;
- sndbuf->head[0].iov_base = (void *)req->rq_buffer;
- sndbuf->head[0].iov_len = bufsiz;
- sndbuf->tail[0].iov_len = 0;
- sndbuf->page_len = 0;
- sndbuf->len = 0;
- sndbuf->buflen = bufsiz;
- rcvbuf->head[0].iov_base = (void *)((char *)req->rq_buffer + bufsiz);
- rcvbuf->head[0].iov_len = bufsiz;
- rcvbuf->tail[0].iov_len = 0;
- rcvbuf->page_len = 0;
- rcvbuf->len = 0;
- rcvbuf->buflen = bufsiz;
+ rpc_xdr_buf_init(&req->rq_snd_buf,
+ req->rq_buffer,
+ req->rq_callsize);
+ rpc_xdr_buf_init(&req->rq_rcv_buf,
+ (char *)req->rq_buffer + req->rq_callsize,
+ req->rq_rcvsize);
/* Encode header and provided arguments */
encode = task->tk_msg.rpc_proc->p_encode;
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c
index d9f76534458..c45fc4c9951 100644
--- a/net/sunrpc/pmap_clnt.c
+++ b/net/sunrpc/pmap_clnt.c
@@ -335,7 +335,8 @@ static struct rpc_procinfo pmap_procedures[] = {
.p_proc = PMAP_SET,
.p_encode = (kxdrproc_t) xdr_encode_mapping,
.p_decode = (kxdrproc_t) xdr_decode_bool,
- .p_bufsiz = 4,
+ .p_arglen = 4,
+ .p_replen = 1,
.p_count = 1,
.p_statidx = PMAP_SET,
.p_name = "SET",
@@ -344,7 +345,8 @@ static struct rpc_procinfo pmap_procedures[] = {
.p_proc = PMAP_UNSET,
.p_encode = (kxdrproc_t) xdr_encode_mapping,
.p_decode = (kxdrproc_t) xdr_decode_bool,
- .p_bufsiz = 4,
+ .p_arglen = 4,
+ .p_replen = 1,
.p_count = 1,
.p_statidx = PMAP_UNSET,
.p_name = "UNSET",
@@ -353,7 +355,8 @@ static struct rpc_procinfo pmap_procedures[] = {
.p_proc = PMAP_GETPORT,
.p_encode = (kxdrproc_t) xdr_encode_mapping,
.p_decode = (kxdrproc_t) xdr_decode_port,
- .p_bufsiz = 4,
+ .p_arglen = 4,
+ .p_replen = 1,
.p_count = 1,
.p_statidx = PMAP_GETPORT,
.p_name = "GETPORT",
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 456a1451030..432ee92cf26 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -823,7 +823,6 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
req->rq_task = task;
req->rq_xprt = xprt;
req->rq_buffer = NULL;
- req->rq_bufsize = 0;
req->rq_xid = xprt_alloc_xid(xprt);
req->rq_release_snd_buf = NULL;
xprt_reset_majortimeo(req);