summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-08-25 02:27:49 -0700
committerDavid S. Miller <davem@davemloft.net>2010-08-25 02:27:49 -0700
commitad1af0fedba14f82b240a03fe20eb9b2fdbd0357 (patch)
tree4d53aa8bc2d9df782aa792e52670ab55c7a44d5b /include
parentb2bc85631e72485b984bcd202a104591874babba (diff)
downloadlinux-3.10-ad1af0fedba14f82b240a03fe20eb9b2fdbd0357.tar.gz
linux-3.10-ad1af0fedba14f82b240a03fe20eb9b2fdbd0357.tar.bz2
linux-3.10-ad1af0fedba14f82b240a03fe20eb9b2fdbd0357.zip
tcp: Combat per-cpu skew in orphan tests.
As reported by Anton Blanchard when we use percpu_counter_read_positive() to make our orphan socket limit checks, the check can be off by up to num_cpus_online() * batch (which is 32 by default) which on a 128 cpu machine can be as large as the default orphan limit itself. Fix this by doing the full expensive sum check if the optimized check triggers. Reported-by: Anton Blanchard <anton@samba.org> Signed-off-by: David S. Miller <davem@davemloft.net> Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Diffstat (limited to 'include')
-rw-r--r--include/net/tcp.h18
1 files changed, 14 insertions, 4 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index df6a2eb2019..eaa9582779d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -268,11 +268,21 @@ static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
return seq3 - seq2 >= seq1 - seq2;
}
-static inline int tcp_too_many_orphans(struct sock *sk, int num)
+static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
{
- return (num > sysctl_tcp_max_orphans) ||
- (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
- atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
+ struct percpu_counter *ocp = sk->sk_prot->orphan_count;
+ int orphans = percpu_counter_read_positive(ocp);
+
+ if (orphans << shift > sysctl_tcp_max_orphans) {
+ orphans = percpu_counter_sum_positive(ocp);
+ if (orphans << shift > sysctl_tcp_max_orphans)
+ return true;
+ }
+
+ if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
+ atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
+ return true;
+ return false;
}
/* syncookies: remember time of last synqueue overflow */