diff options
author | Neal Cardwell <ncardwell@google.com> | 2012-06-28 12:34:21 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-06-28 17:54:03 -0700 |
commit | 9f10d3f6f966ef6f6a8d025a4b1d341923d04607 (patch) | |
tree | 1f47abac17996fdbe45114dd6bbf65f451f1075d /net/ipv6 | |
parent | 9494218fbae2f88bd3f9b887714734abfdf38bab (diff) | |
download | linux-3.10-9f10d3f6f966ef6f6a8d025a4b1d341923d04607.tar.gz linux-3.10-9f10d3f6f966ef6f6a8d025a4b1d341923d04607.tar.bz2 linux-3.10-9f10d3f6f966ef6f6a8d025a4b1d341923d04607.zip |
tcp: plug dst leak in tcp_v6_conn_request()
The code in tcp_v6_conn_request() was implicitly assuming that
tcp_v6_send_synack() would take care of dst_release(), much as
tcp_v4_send_synack() already does. This resulted in
tcp_v6_conn_request() leaking a dst if sysctl_tw_recycle is enabled.
This commit restructures tcp_v6_send_synack() so that it accepts a dst
pointer and takes care of releasing the dst that is passed in, to plug
the leak and avoid future surprises by bringing the IPv6 behavior in
line with the IPv4 side.
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index d1db0caefdc..9c06eafaf69 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -477,7 +477,8 @@ out: } -static int tcp_v6_send_synack(struct sock *sk, +static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, + struct flowi6 *fl6, struct request_sock *req, struct request_values *rvp, u16 queue_mapping) @@ -486,12 +487,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff * skb; struct ipv6_txoptions *opt = np->opt; - struct flowi6 fl6; - struct dst_entry *dst; int err = -ENOMEM; - dst = inet6_csk_route_req(sk, &fl6, req); - if (!dst) + /* First, grab a route. */ + if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL) goto done; skb = tcp_make_synack(sk, dst, req, rvp); @@ -499,9 +498,9 @@ static int tcp_v6_send_synack(struct sock *sk, if (skb) { __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); - fl6.daddr = treq->rmt_addr; + fl6->daddr = treq->rmt_addr; skb_set_queue_mapping(skb, queue_mapping); - err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); + err = ip6_xmit(sk, skb, fl6, opt, np->tclass); err = net_xmit_eval(err); } @@ -514,8 +513,10 @@ done: static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, struct request_values *rvp) { + struct flowi6 fl6; + TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); - return tcp_v6_send_synack(sk, req, rvp, 0); + return tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0); } static void tcp_v6_reqsk_destructor(struct request_sock *req) @@ -1201,7 +1202,7 @@ have_isn: if (security_inet_conn_request(sk, skb, req)) goto drop_and_release; - if (tcp_v6_send_synack(sk, req, + if (tcp_v6_send_synack(sk, dst, &fl6, req, (struct request_values *)&tmp_ext, skb_get_queue_mapping(skb)) || want_cookie) |