diff options
author | Stephen Hemminger <shemminger@linux-foundation.org> | 2007-03-06 20:21:20 -0800 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-25 22:23:48 -0700 |
commit | 43e683926f808cec9802466c27cee7499eda3d11 (patch) | |
tree | 869b0138282a4f4391906f805fbf272e51f296f7 /net/ipv4/tcp_yeah.c | |
parent | c5f5877c043ca471c3a607fa2c864848b19bc49a (diff) | |
download | linux-rpi3-43e683926f808cec9802466c27cee7499eda3d11.tar.gz linux-rpi3-43e683926f808cec9802466c27cee7499eda3d11.tar.bz2 linux-rpi3-43e683926f808cec9802466c27cee7499eda3d11.zip |
[TCP] TCP Yeah: cleanup
Eliminate need for full 6/4/64 divide to compute queue.
Variable maxqueue was really a constant.
Fix indentation.
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_yeah.c')
-rw-r--r-- | net/ipv4/tcp_yeah.c | 42 |
1 files changed, 23 insertions, 19 deletions
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index 18355a2608e1..46dd1bee583a 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -74,7 +74,7 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked) } static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, - u32 seq_rtt, u32 in_flight, int flag) + u32 seq_rtt, u32 in_flight, int flag) { struct tcp_sock *tp = tcp_sk(sk); struct yeah *yeah = inet_csk_ca(sk); @@ -142,8 +142,8 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, */ if (yeah->cntRTT > 2) { - u32 rtt; - u32 queue, maxqueue; + u32 rtt, queue; + u64 bw; /* We have enough RTT samples, so, using the Vegas * algorithm, we determine if we should increase or @@ -158,32 +158,36 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, */ rtt = yeah->minRTT; - queue = (u32)div64_64((u64)tp->snd_cwnd * (rtt - yeah->baseRTT), rtt); - - maxqueue = TCP_YEAH_ALPHA; - - if (queue > maxqueue || - rtt - yeah->baseRTT > (yeah->baseRTT / TCP_YEAH_PHY)) { - - if (queue > maxqueue && tp->snd_cwnd > yeah->reno_count) { - u32 reduction = min( queue / TCP_YEAH_GAMMA , - tp->snd_cwnd >> TCP_YEAH_EPSILON ); + /* Compute excess number of packets above bandwidth + * Avoid doing full 64 bit divide. + */ + bw = tp->snd_cwnd; + bw *= rtt - yeah->baseRTT; + do_div(bw, rtt); + queue = bw; + + if (queue > TCP_YEAH_ALPHA || + rtt - yeah->baseRTT > (yeah->baseRTT / TCP_YEAH_PHY)) { + if (queue > TCP_YEAH_ALPHA + && tp->snd_cwnd > yeah->reno_count) { + u32 reduction = min(queue / TCP_YEAH_GAMMA , + tp->snd_cwnd >> TCP_YEAH_EPSILON); tp->snd_cwnd -= reduction; - tp->snd_cwnd = max( tp->snd_cwnd, yeah->reno_count); + tp->snd_cwnd = max(tp->snd_cwnd, + yeah->reno_count); tp->snd_ssthresh = tp->snd_cwnd; - } + } if (yeah->reno_count <= 2) - yeah->reno_count = max( tp->snd_cwnd>>1, 2U); + yeah->reno_count = max(tp->snd_cwnd>>1, 2U); else yeah->reno_count++; - yeah->doing_reno_now = - min_t( u32, yeah->doing_reno_now + 1 , 0xffffff); - + yeah->doing_reno_now = min(yeah->doing_reno_now + 1, + 0xffffffU); } else { yeah->fast_count++; |