summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorZach Brown <zach.brown@oracle.com>2010-07-06 15:04:34 -0700
committerAndy Grover <andy.grover@oracle.com>2010-09-08 18:16:36 -0700
commitd455ab64096b9a86849c7315c53e595330842db6 (patch)
tree2e02538ae4cb9c708c5dd474d7fbd739a31c0ab1 /net
parent80c51be56ffa257d3177f0d750d90be65d30c22f (diff)
downloadlinux-3.10-d455ab64096b9a86849c7315c53e595330842db6.tar.gz
linux-3.10-d455ab64096b9a86849c7315c53e595330842db6.tar.bz2
linux-3.10-d455ab64096b9a86849c7315c53e595330842db6.zip
RDS/IB: always process recv completions
The recv refill path was leaking fragments because the recv event handler had marked a ring element as free without freeing its frag. This was happening because it wasn't processing receives when the conn wasn't marked up or connecting, as can be the case if it races with rmmod. Two observations support always processing receives in the callback. First, buildup should only post receives, thus triggering recv event handler calls, once it has built up all the state to handle them. Teardown should destroy the CQ and drain the ring before tearing down the state needed to process recvs. Both appear to be true today. Second, this test was fundamentally racy. There is nothing to stop rmmod and connection destruction from swooping in the moment after the conn state was sampled but before real receive procesing starts. Signed-off-by: Zach Brown <zach.brown@oracle.com>
Diffstat (limited to 'net')
-rw-r--r--net/rds/ib_recv.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index c8c60981cf2..9c4208f6b45 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -979,18 +979,22 @@ static inline void rds_poll_cq(struct rds_ib_connection *ic,
* to get a recv completion _before_ the rdmacm ESTABLISHED
* event is processed.
*/
- if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
+ if (wc.status == IB_WC_SUCCESS) {
+ rds_ib_process_recv(conn, recv, wc.byte_len, state);
+ } else {
/* We expect errors as the qp is drained during shutdown */
- if (wc.status == IB_WC_SUCCESS) {
- rds_ib_process_recv(conn, recv, wc.byte_len, state);
- } else {
+ if (rds_conn_up(conn) || rds_conn_connecting(conn))
rds_ib_conn_error(conn, "recv completion on "
- "%pI4 had status %u, disconnecting and "
- "reconnecting\n", &conn->c_faddr,
- wc.status);
- }
+ "%pI4 had status %u, disconnecting and "
+ "reconnecting\n", &conn->c_faddr,
+ wc.status);
}
+ /*
+ * It's very important that we only free this ring entry if we've truly
+ * freed the resources allocated to the entry. The refilling path can
+ * leak if we don't.
+ */
rds_ib_ring_free(&ic->i_recv_ring, 1);
}
}