summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2012-03-12 20:44:07 +0100
committerIngo Molnar <mingo@elte.hu>2012-03-12 20:44:11 +0100
commit35239e23c66f1614c76739b62a299c3c92d6eb68 (patch)
tree7b1e068df888ec9a00b43c1dd7517a6490da6a94 /net
parent3f33ab1c0c741bfab2138c14ba1918a7905a1e8b (diff)
parent87e24f4b67e68d9fd8df16e0bf9c66d1ad2a2533 (diff)
downloadlinux-3.10-35239e23c66f1614c76739b62a299c3c92d6eb68.tar.gz
linux-3.10-35239e23c66f1614c76739b62a299c3c92d6eb68.tar.bz2
linux-3.10-35239e23c66f1614c76739b62a299c3c92d6eb68.zip
Merge branch 'perf/urgent' into perf/core
Merge reason: We are going to queue up a dependent patch. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_multicast.c7
-rw-r--r--net/bridge/br_netfilter.c32
-rw-r--r--net/bridge/br_stp.c8
-rw-r--r--net/bridge/br_stp_if.c3
-rw-r--r--net/bridge/netfilter/ebtables.c26
-rw-r--r--net/core/rtnetlink.c18
-rw-r--r--net/ipv4/inetpeer.c81
-rw-r--r--net/ipv4/route.c12
-rw-r--r--net/ipv4/tcp_input.c23
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/mac80211/rate.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c8
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/openvswitch/actions.c44
-rw-r--r--net/openvswitch/datapath.c3
16 files changed, 199 insertions, 78 deletions
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 568d5bf1753..702a1ae9220 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -446,8 +446,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
ip6h->nexthdr = IPPROTO_HOPOPTS;
ip6h->hop_limit = 1;
ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
- ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
- &ip6h->saddr);
+ if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
+ &ip6h->saddr)) {
+ kfree_skb(skb);
+ return NULL;
+ }
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
hopopt = (u8 *)(ip6h + 1);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 84122472656..dec4f381713 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -62,6 +62,15 @@ static int brnf_filter_pppoe_tagged __read_mostly = 0;
#define brnf_filter_pppoe_tagged 0
#endif
+#define IS_IP(skb) \
+ (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
+
+#define IS_IPV6(skb) \
+ (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
+
+#define IS_ARP(skb) \
+ (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
+
static inline __be16 vlan_proto(const struct sk_buff *skb)
{
if (vlan_tx_tag_present(skb))
@@ -639,8 +648,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
return NF_DROP;
br = p->br;
- if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
- IS_PPPOE_IPV6(skb)) {
+ if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) {
if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
return NF_ACCEPT;
@@ -651,8 +659,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
if (!brnf_call_iptables && !br->nf_call_iptables)
return NF_ACCEPT;
- if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) &&
- !IS_PPPOE_IP(skb))
+ if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb))
return NF_ACCEPT;
nf_bridge_pull_encap_header_rcsum(skb);
@@ -701,7 +708,7 @@ static int br_nf_forward_finish(struct sk_buff *skb)
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct net_device *in;
- if (skb->protocol != htons(ETH_P_ARP) && !IS_VLAN_ARP(skb)) {
+ if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
in = nf_bridge->physindev;
if (nf_bridge->mask & BRNF_PKT_TYPE) {
skb->pkt_type = PACKET_OTHERHOST;
@@ -718,6 +725,7 @@ static int br_nf_forward_finish(struct sk_buff *skb)
return 0;
}
+
/* This is the 'purely bridged' case. For IP, we pass the packet to
* netfilter with indev and outdev set to the bridge device,
* but we are still able to filter on the 'real' indev/outdev
@@ -744,11 +752,9 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
if (!parent)
return NF_DROP;
- if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
- IS_PPPOE_IP(skb))
+ if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
pf = PF_INET;
- else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
- IS_PPPOE_IPV6(skb))
+ else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
pf = PF_INET6;
else
return NF_ACCEPT;
@@ -795,7 +801,7 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
if (!brnf_call_arptables && !br->nf_call_arptables)
return NF_ACCEPT;
- if (skb->protocol != htons(ETH_P_ARP)) {
+ if (!IS_ARP(skb)) {
if (!IS_VLAN_ARP(skb))
return NF_ACCEPT;
nf_bridge_pull_encap_header(skb);
@@ -853,11 +859,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
if (!realoutdev)
return NF_DROP;
- if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
- IS_PPPOE_IP(skb))
+ if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
pf = PF_INET;
- else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
- IS_PPPOE_IPV6(skb))
+ else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
pf = PF_INET6;
else
return NF_ACCEPT;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index dd147d78a58..8c836d96ba7 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -17,9 +17,9 @@
#include "br_private_stp.h"
/* since time values in bpdu are in jiffies and then scaled (1/256)
- * before sending, make sure that is at least one.
+ * before sending, make sure that is at least one STP tick.
*/
-#define MESSAGE_AGE_INCR ((HZ < 256) ? 1 : (HZ/256))
+#define MESSAGE_AGE_INCR ((HZ / 256) + 1)
static const char *const br_port_state_names[] = {
[BR_STATE_DISABLED] = "disabled",
@@ -31,7 +31,7 @@ static const char *const br_port_state_names[] = {
void br_log_state(const struct net_bridge_port *p)
{
- br_info(p->br, "port %u(%s) entering %s state\n",
+ br_info(p->br, "port %u(%s) entered %s state\n",
(unsigned) p->port_no, p->dev->name,
br_port_state_names[p->state]);
}
@@ -186,7 +186,7 @@ static void br_record_config_information(struct net_bridge_port *p,
p->designated_cost = bpdu->root_path_cost;
p->designated_bridge = bpdu->bridge_id;
p->designated_port = bpdu->port_id;
- p->designated_age = jiffies + bpdu->message_age;
+ p->designated_age = jiffies - bpdu->message_age;
mod_timer(&p->message_age_timer, jiffies
+ (p->br->max_age - bpdu->message_age));
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 19308e305d8..f494496373d 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -98,14 +98,13 @@ void br_stp_disable_port(struct net_bridge_port *p)
struct net_bridge *br = p->br;
int wasroot;
- br_log_state(p);
-
wasroot = br_is_root_bridge(br);
br_become_designated_port(p);
p->state = BR_STATE_DISABLED;
p->topology_change_ack = 0;
p->config_pending = 0;
+ br_log_state(p);
br_ifinfo_notify(RTM_NEWLINK, p);
del_timer(&p->message_age_timer);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 5864cc49136..5fe2ff3b01e 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1335,7 +1335,12 @@ static inline int ebt_make_matchname(const struct ebt_entry_match *m,
const char *base, char __user *ubase)
{
char __user *hlp = ubase + ((char *)m - base);
- if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
+ char name[EBT_FUNCTION_MAXNAMELEN] = {};
+
+ /* ebtables expects 32 bytes long names but xt_match names are 29 bytes
+ long. Copy 29 bytes and fill remaining bytes with zeroes. */
+ strncpy(name, m->u.match->name, sizeof(name));
+ if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
return -EFAULT;
return 0;
}
@@ -1344,7 +1349,10 @@ static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
const char *base, char __user *ubase)
{
char __user *hlp = ubase + ((char *)w - base);
- if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
+ char name[EBT_FUNCTION_MAXNAMELEN] = {};
+
+ strncpy(name, w->u.watcher->name, sizeof(name));
+ if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
return -EFAULT;
return 0;
}
@@ -1355,6 +1363,7 @@ ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
int ret;
char __user *hlp;
const struct ebt_entry_target *t;
+ char name[EBT_FUNCTION_MAXNAMELEN] = {};
if (e->bitmask == 0)
return 0;
@@ -1368,7 +1377,8 @@ ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
if (ret != 0)
return ret;
- if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
+ strncpy(name, t->u.target->name, sizeof(name));
+ if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
return -EFAULT;
return 0;
}
@@ -1893,10 +1903,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
switch (compat_mwt) {
case EBT_COMPAT_MATCH:
- match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
- name, 0), "ebt_%s", name);
- if (match == NULL)
- return -ENOENT;
+ match = xt_request_find_match(NFPROTO_BRIDGE, name, 0);
if (IS_ERR(match))
return PTR_ERR(match);
@@ -1915,10 +1922,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
break;
case EBT_COMPAT_WATCHER: /* fallthrough */
case EBT_COMPAT_TARGET:
- wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
- name, 0), "ebt_%s", name);
- if (wt == NULL)
- return -ENOENT;
+ wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0);
if (IS_ERR(wt))
return PTR_ERR(wt);
off = xt_compat_target_offset(wt);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 606a6e8f367..f965dce6f20 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1060,11 +1060,12 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
cb->seq = net->dev_base_seq;
- nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
- ifla_policy);
+ if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
+ ifla_policy) >= 0) {
- if (tb[IFLA_EXT_MASK])
- ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+ if (tb[IFLA_EXT_MASK])
+ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+ }
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
@@ -1900,10 +1901,11 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
u32 ext_filter_mask = 0;
u16 min_ifinfo_dump_size = 0;
- nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, ifla_policy);
-
- if (tb[IFLA_EXT_MASK])
- ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+ if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
+ ifla_policy) >= 0) {
+ if (tb[IFLA_EXT_MASK])
+ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+ }
if (!ext_filter_mask)
return NLMSG_GOODSIZE;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index bf4a9c4808e..d4d61b694fa 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/net.h>
+#include <linux/workqueue.h>
#include <net/ip.h>
#include <net/inetpeer.h>
#include <net/secure_seq.h>
@@ -66,6 +67,11 @@
static struct kmem_cache *peer_cachep __read_mostly;
+static LIST_HEAD(gc_list);
+static const int gc_delay = 60 * HZ;
+static struct delayed_work gc_work;
+static DEFINE_SPINLOCK(gc_lock);
+
#define node_height(x) x->avl_height
#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
@@ -102,6 +108,50 @@ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries m
int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
+static void inetpeer_gc_worker(struct work_struct *work)
+{
+ struct inet_peer *p, *n;
+ LIST_HEAD(list);
+
+ spin_lock_bh(&gc_lock);
+ list_replace_init(&gc_list, &list);
+ spin_unlock_bh(&gc_lock);
+
+ if (list_empty(&list))
+ return;
+
+ list_for_each_entry_safe(p, n, &list, gc_list) {
+
+ if(need_resched())
+ cond_resched();
+
+ if (p->avl_left != peer_avl_empty) {
+ list_add_tail(&p->avl_left->gc_list, &list);
+ p->avl_left = peer_avl_empty;
+ }
+
+ if (p->avl_right != peer_avl_empty) {
+ list_add_tail(&p->avl_right->gc_list, &list);
+ p->avl_right = peer_avl_empty;
+ }
+
+ n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
+
+ if (!atomic_read(&p->refcnt)) {
+ list_del(&p->gc_list);
+ kmem_cache_free(peer_cachep, p);
+ }
+ }
+
+ if (list_empty(&list))
+ return;
+
+ spin_lock_bh(&gc_lock);
+ list_splice(&list, &gc_list);
+ spin_unlock_bh(&gc_lock);
+
+ schedule_delayed_work(&gc_work, gc_delay);
+}
/* Called from ip_output.c:ip_init */
void __init inet_initpeers(void)
@@ -126,6 +176,7 @@ void __init inet_initpeers(void)
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
NULL);
+ INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
}
static int addr_compare(const struct inetpeer_addr *a,
@@ -447,9 +498,8 @@ relookup:
p->rate_last = 0;
p->pmtu_expires = 0;
p->pmtu_orig = 0;
- p->redirect_genid = 0;
memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
-
+ INIT_LIST_HEAD(&p->gc_list);
/* Link the node. */
link_to_pool(p, base);
@@ -509,3 +559,30 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
return rc;
}
EXPORT_SYMBOL(inet_peer_xrlim_allow);
+
+void inetpeer_invalidate_tree(int family)
+{
+ struct inet_peer *old, *new, *prev;
+ struct inet_peer_base *base = family_to_base(family);
+
+ write_seqlock_bh(&base->lock);
+
+ old = base->root;
+ if (old == peer_avl_empty_rcu)
+ goto out;
+
+ new = peer_avl_empty_rcu;
+
+ prev = cmpxchg(&base->root, old, new);
+ if (prev == old) {
+ base->total = 0;
+ spin_lock(&gc_lock);
+ list_add_tail(&prev->gc_list, &gc_list);
+ spin_unlock(&gc_lock);
+ schedule_delayed_work(&gc_work, gc_delay);
+ }
+
+out:
+ write_sequnlock_bh(&base->lock);
+}
+EXPORT_SYMBOL(inetpeer_invalidate_tree);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index bcacf54e541..01977479617 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -132,7 +132,6 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int rt_chain_length_max __read_mostly = 20;
-static int redirect_genid;
static struct delayed_work expires_work;
static unsigned long expires_ljiffies;
@@ -937,7 +936,7 @@ static void rt_cache_invalidate(struct net *net)
get_random_bytes(&shuffle, sizeof(shuffle));
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
- redirect_genid++;
+ inetpeer_invalidate_tree(AF_INET);
}
/*
@@ -1485,10 +1484,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
peer = rt->peer;
if (peer) {
- if (peer->redirect_learned.a4 != new_gw ||
- peer->redirect_genid != redirect_genid) {
+ if (peer->redirect_learned.a4 != new_gw) {
peer->redirect_learned.a4 = new_gw;
- peer->redirect_genid = redirect_genid;
atomic_inc(&__rt_peer_genid);
}
check_peer_redir(&rt->dst, peer);
@@ -1793,8 +1790,6 @@ static void ipv4_validate_peer(struct rtable *rt)
if (peer) {
check_peer_pmtu(&rt->dst, peer);
- if (peer->redirect_genid != redirect_genid)
- peer->redirect_learned.a4 = 0;
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway)
check_peer_redir(&rt->dst, peer);
@@ -1958,8 +1953,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
dst_init_metrics(&rt->dst, peer->metrics, false);
check_peer_pmtu(&rt->dst, peer);
- if (peer->redirect_genid != redirect_genid)
- peer->redirect_learned.a4 = 0;
+
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) {
rt->rt_gateway = peer->redirect_learned.a4;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 53c8ce4046b..b5e315f1364 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1403,8 +1403,16 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
BUG_ON(!pcount);
- /* Adjust hint for FACK. Non-FACK is handled in tcp_sacktag_one(). */
- if (tcp_is_fack(tp) && (skb == tp->lost_skb_hint))
+ /* Adjust counters and hints for the newly sacked sequence
+ * range but discard the return value since prev is already
+ * marked. We must tag the range first because the seq
+ * advancement below implicitly advances
+ * tcp_highest_sack_seq() when skb is highest_sack.
+ */
+ tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
+ start_seq, end_seq, dup_sack, pcount);
+
+ if (skb == tp->lost_skb_hint)
tp->lost_cnt_hint += pcount;
TCP_SKB_CB(prev)->end_seq += shifted;
@@ -1430,12 +1438,6 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
skb_shinfo(skb)->gso_type = 0;
}
- /* Adjust counters and hints for the newly sacked sequence range but
- * discard the return value since prev is already marked.
- */
- tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
- start_seq, end_seq, dup_sack, pcount);
-
/* Difference in this won't matter, both ACKed by the same cumul. ACK */
TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1583,6 +1585,10 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
}
}
+ /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */
+ if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
+ goto fallback;
+
if (!skb_shift(prev, skb, len))
goto fallback;
if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
@@ -2567,6 +2573,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
if (cnt > packets) {
if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
+ (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ||
(oldcnt >= packets))
break;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c02280a4d12..6b8ebc5da0e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -434,6 +434,10 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
/* Join all-node multicast group */
ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
+ /* Join all-router multicast group if forwarding is set */
+ if (ndev->cnf.forwarding && dev && (dev->flags & IFF_MULTICAST))
+ ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
+
return ndev;
}
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 01a21c2f6ab..8e2137bd87e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1332,6 +1332,9 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
hw_roc = true;
list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ continue;
if (sdata->old_idle == sdata->vif.bss_conf.idle)
continue;
if (!ieee80211_sdata_running(sdata))
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index ad64f4d5271..f9b8e819ca6 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -344,7 +344,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
info->control.rates[i].idx = -1;
info->control.rates[i].flags = 0;
- info->control.rates[i].count = 1;
+ info->control.rates[i].count = 0;
}
if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ed86a3be678..fa4b82c8ae8 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -635,8 +635,12 @@ static noinline int early_drop(struct net *net, unsigned int hash)
if (del_timer(&ct->timeout)) {
death_by_timeout((unsigned long)ct);
- dropped = 1;
- NF_CT_STAT_INC_ATOMIC(net, early_drop);
+ /* Check if we indeed killed this entry. Reliable event
+ delivery may have inserted it into the dying list. */
+ if (test_bit(IPS_DYING_BIT, &ct->status)) {
+ dropped = 1;
+ NF_CT_STAT_INC_ATOMIC(net, early_drop);
+ }
}
nf_ct_put(ct);
return dropped;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 30c9d4ca021..10687692831 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1041,16 +1041,13 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
if (!parse_nat_setup) {
#ifdef CONFIG_MODULES
rcu_read_unlock();
- spin_unlock_bh(&nf_conntrack_lock);
nfnl_unlock();
if (request_module("nf-nat-ipv4") < 0) {
nfnl_lock();
- spin_lock_bh(&nf_conntrack_lock);
rcu_read_lock();
return -EOPNOTSUPP;
}
nfnl_lock();
- spin_lock_bh(&nf_conntrack_lock);
rcu_read_lock();
if (nfnetlink_parse_nat_setup_hook)
return -EAGAIN;
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 2725d1bdf29..48badffaafc 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira Networks.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -145,9 +145,16 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
*addr, new_addr, 1);
} else if (nh->protocol == IPPROTO_UDP) {
- if (likely(transport_len >= sizeof(struct udphdr)))
- inet_proto_csum_replace4(&udp_hdr(skb)->check, skb,
- *addr, new_addr, 1);
+ if (likely(transport_len >= sizeof(struct udphdr))) {
+ struct udphdr *uh = udp_hdr(skb);
+
+ if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+ inet_proto_csum_replace4(&uh->check, skb,
+ *addr, new_addr, 1);
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ }
+ }
}
csum_replace4(&nh->check, *addr, new_addr);
@@ -197,8 +204,22 @@ static void set_tp_port(struct sk_buff *skb, __be16 *port,
skb->rxhash = 0;
}
-static int set_udp_port(struct sk_buff *skb,
- const struct ovs_key_udp *udp_port_key)
+static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
+{
+ struct udphdr *uh = udp_hdr(skb);
+
+ if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
+ set_tp_port(skb, port, new_port, &uh->check);
+
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ } else {
+ *port = new_port;
+ skb->rxhash = 0;
+ }
+}
+
+static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
{
struct udphdr *uh;
int err;
@@ -210,16 +231,15 @@ static int set_udp_port(struct sk_buff *skb,
uh = udp_hdr(skb);
if (udp_port_key->udp_src != uh->source)
- set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check);
+ set_udp_port(skb, &uh->source, udp_port_key->udp_src);
if (udp_port_key->udp_dst != uh->dest)
- set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check);
+ set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
return 0;
}
-static int set_tcp_port(struct sk_buff *skb,
- const struct ovs_key_tcp *tcp_port_key)
+static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
{
struct tcphdr *th;
int err;
@@ -328,11 +348,11 @@ static int execute_set_action(struct sk_buff *skb,
break;
case OVS_KEY_ATTR_TCP:
- err = set_tcp_port(skb, nla_data(nested_attr));
+ err = set_tcp(skb, nla_data(nested_attr));
break;
case OVS_KEY_ATTR_UDP:
- err = set_udp_port(skb, nla_data(nested_attr));
+ err = set_udp(skb, nla_data(nested_attr));
break;
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index ce64c18b8c7..2c030505b33 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1521,6 +1521,9 @@ static struct vport *lookup_vport(struct ovs_header *ovs_header,
vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
if (!vport)
return ERR_PTR(-ENODEV);
+ if (ovs_header->dp_ifindex &&
+ ovs_header->dp_ifindex != get_dpifindex(vport->dp))
+ return ERR_PTR(-ENODEV);
return vport;
} else if (a[OVS_VPORT_ATTR_PORT_NO]) {
u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);