summaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-10-04 11:56:38 -0700
committerDavid S. Miller <davem@davemloft.net>2010-10-04 11:56:38 -0700
commit21a180cda012e1f93e362dd4a9b0bfd3d8c92940 (patch)
tree0e0d10baa3fdcd8ffbc6881076ff1695808dad9d /net/ipv4
parentc7d4426a98a5f6654cd0b4b33d9dab2e77192c18 (diff)
parent51e97a12bef19b7e43199fc153cf9bd5f2140362 (diff)
downloadlinux-3.10-21a180cda012e1f93e362dd4a9b0bfd3d8c92940.tar.gz
linux-3.10-21a180cda012e1f93e362dd4a9b0bfd3d8c92940.tar.bz2
linux-3.10-21a180cda012e1f93e362dd4a9b0bfd3d8c92940.zip
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: net/ipv4/Kconfig net/ipv4/tcp_timer.c
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/Kconfig4
-rw-r--r--net/ipv4/igmp.c14
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_timer.c25
6 files changed, 33 insertions, 17 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 5462e2d147a..e848e6c062c 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -223,7 +223,7 @@ config NET_IPGRE_DEMUX
config NET_IPGRE
tristate "IP: GRE tunnels over IP"
- depends on NET_IPGRE_DEMUX
+ depends on (IPV6 || IPV6=n) && NET_IPGRE_DEMUX
help
Tunneling means encapsulating data of one protocol type within
another protocol and sending it over a channel that understands the
@@ -419,7 +419,7 @@ config INET_XFRM_MODE_BEET
If unsure, say Y.
config INET_LRO
- bool "Large Receive Offload (ipv4/tcp)"
+ tristate "Large Receive Offload (ipv4/tcp)"
default y
---help---
Support for Large Receive Offload (ipv4/tcp).
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 1fdcacd36ce..2a4bb76f213 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -834,7 +834,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
int mark = 0;
- if (len == 8 || IGMP_V2_SEEN(in_dev)) {
+ if (len == 8) {
if (ih->code == 0) {
/* Alas, old v1 router presents here. */
@@ -856,6 +856,18 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
igmpv3_clear_delrec(in_dev);
} else if (len < 12) {
return; /* ignore bogus packet; freed by caller */
+ } else if (IGMP_V1_SEEN(in_dev)) {
+ /* This is a v3 query with v1 queriers present */
+ max_delay = IGMP_Query_Response_Interval;
+ group = 0;
+ } else if (IGMP_V2_SEEN(in_dev)) {
+ /* this is a v3 query with v2 queriers present;
+ * Interpretation of the max_delay code is problematic here.
+ * A real v2 host would use ih_code directly, while v3 has a
+ * different encoding. We use the v3 encoding as more likely
+ * to be intended in a v3 query.
+ */
+ max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
} else { /* v3 */
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
return;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c3cb8bd2363..04e0df82b88 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1232,7 +1232,7 @@ restart:
}
if (net_ratelimit())
- printk(KERN_WARNING "Neighbour table overflow.\n");
+ printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
rt_drop(rt);
return -ENOBUFS;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 19192c5fe67..1664a0590bb 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -943,7 +943,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
sg = sk->sk_route_caps & NETIF_F_SG;
while (--iovlen >= 0) {
- int seglen = iov->iov_len;
+ size_t seglen = iov->iov_len;
unsigned char __user *from = iov->iov_base;
iov++;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eaf20e7e61d..f6fdd727a23 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2532,7 +2532,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
cnt += tcp_skb_pcount(skb);
if (cnt > packets) {
- if (tcp_is_sack(tp) || (oldcnt >= packets))
+ if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
+ (oldcnt >= packets))
break;
mss = skb_shinfo(skb)->gso_size;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index baea4a12902..f3c8c6c019a 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -135,13 +135,16 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
/* This function calculates a "timeout" which is equivalent to the timeout of a
* TCP connection after "boundary" unsuccessful, exponentially backed-off
- * retransmissions with an initial RTO of TCP_RTO_MIN.
+ * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
+ * syn_set flag is set.
*/
static bool retransmits_timed_out(struct sock *sk,
unsigned int boundary,
- unsigned int timeout)
+ unsigned int timeout,
+ bool syn_set)
{
unsigned int linear_backoff_thresh, start_ts;
+ unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
if (!inet_csk(sk)->icsk_retransmits)
return false;
@@ -152,12 +155,12 @@ static bool retransmits_timed_out(struct sock *sk,
start_ts = tcp_sk(sk)->retrans_stamp;
if (likely(timeout == 0)) {
- linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
+ linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
if (boundary <= linear_backoff_thresh)
- timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
+ timeout = ((2 << boundary) - 1) * rto_base;
else
- timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
+ timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
}
return (tcp_time_stamp - start_ts) >= timeout;
@@ -168,14 +171,15 @@ static int tcp_write_timeout(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
int retry_until;
- bool do_reset;
+ bool do_reset, syn_set = 0;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (icsk->icsk_retransmits)
dst_negative_advice(sk);
retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
+ syn_set = 1;
} else {
- if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) {
+ if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
/* Black hole detection */
tcp_mtu_probing(icsk, sk);
@@ -188,7 +192,7 @@ static int tcp_write_timeout(struct sock *sk)
retry_until = tcp_orphan_retries(sk, alive);
do_reset = alive ||
- !retransmits_timed_out(sk, retry_until, 0);
+ !retransmits_timed_out(sk, retry_until, 0, 0);
if (tcp_out_of_resources(sk, do_reset))
return 1;
@@ -196,8 +200,7 @@ static int tcp_write_timeout(struct sock *sk)
}
if (retransmits_timed_out(sk, retry_until,
- (1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV) ? 0 :
- icsk->icsk_user_timeout)) {
+ syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
/* Has it gone just too far? */
tcp_write_err(sk);
return 1;
@@ -439,7 +442,7 @@ out_reset_timer:
icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
- if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0))
+ if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
__sk_dst_reset(sk);
out:;