summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2008-09-20 21:23:49 -0700
committerDavid S. Miller <davem@davemloft.net>2008-09-20 21:23:49 -0700
commit08ebd1721ab8fd362e90ae17b461c07b23fa2824 (patch)
treee49b750b77817431b640184958325225dc5ab872
parent61eb55f4db7eaf5fb2d5ec12981a8cda755bb0e1 (diff)
downloadlinux-3.10-08ebd1721ab8fd362e90ae17b461c07b23fa2824.tar.gz
linux-3.10-08ebd1721ab8fd362e90ae17b461c07b23fa2824.tar.bz2
linux-3.10-08ebd1721ab8fd362e90ae17b461c07b23fa2824.zip
tcp: remove tp->lost_out guard to make joining diff nicer
The validity of the retransmit_high must then be ensured if no L'ed skb exits! This makes a minor change to behavior, we now have to iterate the head to find out that the loop terminates. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/tcp_output.c75
1 files changed, 38 insertions, 37 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2f24ecc3706..9f44be633ef 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2034,53 +2034,54 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
struct sk_buff *skb;
int mib_idx;
+ if (!tp->lost_out)
+ tp->retransmit_high = tp->snd_una;
+
if (tp->retransmit_skb_hint)
skb = tp->retransmit_skb_hint;
else
skb = tcp_write_queue_head(sk);
/* First pass: retransmit lost packets. */
- if (tp->lost_out) {
- tcp_for_write_queue_from(skb, sk) {
- __u8 sacked = TCP_SKB_CB(skb)->sacked;
+ tcp_for_write_queue_from(skb, sk) {
+ __u8 sacked = TCP_SKB_CB(skb)->sacked;
- if (skb == tcp_send_head(sk))
- break;
- /* we could do better than to assign each time */
- tp->retransmit_skb_hint = skb;
-
- /* Assume this retransmit will generate
- * only one packet for congestion window
- * calculation purposes. This works because
- * tcp_retransmit_skb() will chop up the
- * packet to be MSS sized and all the
- * packet counting works out.
- */
- if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
- return;
- if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high))
- break;
- if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
- continue;
+ if (skb == tcp_send_head(sk))
+ break;
+ /* we could do better than to assign each time */
+ tp->retransmit_skb_hint = skb;
+
+ /* Assume this retransmit will generate
+ * only one packet for congestion window
+ * calculation purposes. This works because
+ * tcp_retransmit_skb() will chop up the
+ * packet to be MSS sized and all the
+ * packet counting works out.
+ */
+ if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
+ return;
+ if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high))
+ break;
+ if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
+ continue;
- if (!(sacked & TCPCB_LOST))
- continue;
+ if (!(sacked & TCPCB_LOST))
+ continue;
- if (tcp_retransmit_skb(sk, skb)) {
- tp->retransmit_skb_hint = NULL;
- return;
- }
- if (icsk->icsk_ca_state != TCP_CA_Loss)
- mib_idx = LINUX_MIB_TCPFASTRETRANS;
- else
- mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
- NET_INC_STATS_BH(sock_net(sk), mib_idx);
-
- if (skb == tcp_write_queue_head(sk))
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX);
+ if (tcp_retransmit_skb(sk, skb)) {
+ tp->retransmit_skb_hint = NULL;
+ return;
}
+ if (icsk->icsk_ca_state != TCP_CA_Loss)
+ mib_idx = LINUX_MIB_TCPFASTRETRANS;
+ else
+ mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
+ NET_INC_STATS_BH(sock_net(sk), mib_idx);
+
+ if (skb == tcp_write_queue_head(sk))
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+ inet_csk(sk)->icsk_rto,
+ TCP_RTO_MAX);
}
/* OK, demanded retransmission is finished. */