summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/dsa/slave.c72
-rw-r--r--net/dsa/tag_dsa.c1
-rw-r--r--net/dsa/tag_edsa.c1
-rw-r--r--net/dsa/tag_trailer.c1
-rw-r--r--net/ipv4/proc.c58
-rw-r--r--net/ipv6/ip6mr.c9
-rw-r--r--net/key/af_key.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c3
-rw-r--r--net/unix/af_unix.c31
-rw-r--r--net/unix/garbage.c49
12 files changed, 174 insertions, 56 deletions
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 0feefa4e1a4..3628e0a81b4 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -314,7 +314,7 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
dev_info->ingress_priority_map[6],
dev_info->ingress_priority_map[7]);
- seq_printf(seq, "EGRESSS priority Mappings: ");
+ seq_printf(seq, " EGRESS priority mappings: ");
for (i = 0; i < 16; i++) {
const struct vlan_priority_tci_mapping *mp
= dev_info->egress_priority_map[i];
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f24a4951008..267185a848f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -486,8 +486,8 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
shinfo->frag_list = NULL;
memset(skb, 0, offsetof(struct sk_buff, tail));
- skb_reset_tail_pointer(skb);
skb->data = skb->head + NET_SKB_PAD;
+ skb_reset_tail_pointer(skb);
return 1;
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 7384bad8165..a3a410d20da 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -10,6 +10,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/phy.h>
#include "dsa_priv.h"
@@ -49,11 +50,57 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
/* slave device handling ****************************************************/
static int dsa_slave_open(struct net_device *dev)
{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct net_device *master = p->parent->master_netdev;
+ int err;
+
+ if (!(master->flags & IFF_UP))
+ return -ENETDOWN;
+
+ if (compare_ether_addr(dev->dev_addr, master->dev_addr)) {
+ err = dev_unicast_add(master, dev->dev_addr, ETH_ALEN);
+ if (err < 0)
+ goto out;
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ err = dev_set_allmulti(master, 1);
+ if (err < 0)
+ goto del_unicast;
+ }
+ if (dev->flags & IFF_PROMISC) {
+ err = dev_set_promiscuity(master, 1);
+ if (err < 0)
+ goto clear_allmulti;
+ }
+
return 0;
+
+clear_allmulti:
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(master, -1);
+del_unicast:
+ if (compare_ether_addr(dev->dev_addr, master->dev_addr))
+ dev_unicast_delete(master, dev->dev_addr, ETH_ALEN);
+out:
+ return err;
}
static int dsa_slave_close(struct net_device *dev)
{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct net_device *master = p->parent->master_netdev;
+
+ dev_mc_unsync(master, dev);
+ dev_unicast_unsync(master, dev);
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(master, -1);
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(master, -1);
+
+ if (compare_ether_addr(dev->dev_addr, master->dev_addr))
+ dev_unicast_delete(master, dev->dev_addr, ETH_ALEN);
+
return 0;
}
@@ -77,9 +124,30 @@ static void dsa_slave_set_rx_mode(struct net_device *dev)
dev_unicast_sync(master, dev);
}
-static int dsa_slave_set_mac_address(struct net_device *dev, void *addr)
+static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
{
- memcpy(dev->dev_addr, addr + 2, 6);
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct net_device *master = p->parent->master_netdev;
+ struct sockaddr *addr = a;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (!(dev->flags & IFF_UP))
+ goto out;
+
+ if (compare_ether_addr(addr->sa_data, master->dev_addr)) {
+ err = dev_unicast_add(master, addr->sa_data, ETH_ALEN);
+ if (err < 0)
+ return err;
+ }
+
+ if (compare_ether_addr(dev->dev_addr, master->dev_addr))
+ dev_unicast_delete(master, dev->dev_addr, ETH_ALEN);
+
+out:
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
return 0;
}
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index ff55823a653..f99a019b939 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -159,6 +159,7 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
skb->dev = ds->ports[source_port];
skb_push(skb, ETH_HLEN);
+ skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev);
skb->dev->stats.rx_packets++;
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 24b1c76fa7a..328ec957f78 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -178,6 +178,7 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
skb->dev = ds->ports[source_port];
skb_push(skb, ETH_HLEN);
+ skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev);
skb->dev->stats.rx_packets++;
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 3bfd2e55877..b59132878ad 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -95,6 +95,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
skb->dev = ds->ports[source_port];
skb_push(skb, ETH_HLEN);
+ skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev);
skb->dev->stats.rx_packets++;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 8f5a403f6f6..a631a1f110c 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -237,43 +237,45 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_SENTINEL
};
+static void icmpmsg_put_line(struct seq_file *seq, unsigned long *vals,
+ unsigned short *type, int count)
+{
+ int j;
+
+ if (count) {
+ seq_printf(seq, "\nIcmpMsg:");
+ for (j = 0; j < count; ++j)
+ seq_printf(seq, " %sType%u",
+ type[j] & 0x100 ? "Out" : "In",
+ type[j] & 0xff);
+ seq_printf(seq, "\nIcmpMsg:");
+ for (j = 0; j < count; ++j)
+ seq_printf(seq, " %lu", vals[j]);
+ }
+}
+
static void icmpmsg_put(struct seq_file *seq)
{
#define PERLINE 16
- int j, i, count;
- static int out[PERLINE];
+ int i, count;
+ unsigned short type[PERLINE];
+ unsigned long vals[PERLINE], val;
struct net *net = seq->private;
count = 0;
for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
-
- if (snmp_fold_field((void **) net->mib.icmpmsg_statistics, i))
- out[count++] = i;
- if (count < PERLINE)
- continue;
-
- seq_printf(seq, "\nIcmpMsg:");
- for (j = 0; j < PERLINE; ++j)
- seq_printf(seq, " %sType%u", i & 0x100 ? "Out" : "In",
- i & 0xff);
- seq_printf(seq, "\nIcmpMsg: ");
- for (j = 0; j < PERLINE; ++j)
- seq_printf(seq, " %lu",
- snmp_fold_field((void **) net->mib.icmpmsg_statistics,
- out[j]));
- seq_putc(seq, '\n');
- }
- if (count) {
- seq_printf(seq, "\nIcmpMsg:");
- for (j = 0; j < count; ++j)
- seq_printf(seq, " %sType%u", out[j] & 0x100 ? "Out" :
- "In", out[j] & 0xff);
- seq_printf(seq, "\nIcmpMsg:");
- for (j = 0; j < count; ++j)
- seq_printf(seq, " %lu", snmp_fold_field((void **)
- net->mib.icmpmsg_statistics, out[j]));
+ val = snmp_fold_field((void **) net->mib.icmpmsg_statistics, i);
+ if (val) {
+ type[count] = i;
+ vals[count++] = val;
+ }
+ if (count == PERLINE) {
+ icmpmsg_put_line(seq, vals, type, count);
+ count = 0;
+ }
}
+ icmpmsg_put_line(seq, vals, type, count);
#undef PERLINE
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index c491fb98a5e..b17377d6f26 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -980,14 +980,15 @@ int __init ip6_mr_init(void)
goto proc_cache_fail;
#endif
return 0;
-reg_notif_fail:
- kmem_cache_destroy(mrt_cachep);
#ifdef CONFIG_PROC_FS
-proc_vif_fail:
- unregister_netdevice_notifier(&ip6_mr_notifier);
proc_cache_fail:
proc_net_remove(&init_net, "ip6_mr_vif");
+proc_vif_fail:
+ unregister_netdevice_notifier(&ip6_mr_notifier);
#endif
+reg_notif_fail:
+ del_timer(&ipmr_expire_timer);
+ kmem_cache_destroy(mrt_cachep);
return err;
}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 3440a4637f0..5b22e011653 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3188,6 +3188,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
return xp;
out:
+ xp->walk.dead = 1;
xfrm_policy_destroy(xp);
return NULL;
}
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 2f367219073..425ab144f15 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -711,7 +711,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
iph = ipv6_hdr(skb);
iph->version = 6;
iph->nexthdr = IPPROTO_IPV6;
- iph->payload_len = old_iph->payload_len + sizeof(old_iph);
+ iph->payload_len = old_iph->payload_len;
+ be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
iph->priority = old_iph->priority;
memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
iph->daddr = rt->rt6i_dst.addr;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 7d2e4f8f817..4a39771d037 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1302,14 +1302,23 @@ static void unix_destruct_fds(struct sk_buff *skb)
sock_wfree(skb);
}
-static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
int i;
+
+ /*
+ * Need to duplicate file references for the sake of garbage
+ * collection. Otherwise a socket in the fps might become a
+ * candidate for GC while the skb is not yet queued.
+ */
+ UNIXCB(skb).fp = scm_fp_dup(scm->fp);
+ if (!UNIXCB(skb).fp)
+ return -ENOMEM;
+
for (i=scm->fp->count-1; i>=0; i--)
unix_inflight(scm->fp->fp[i]);
- UNIXCB(skb).fp = scm->fp;
skb->destructor = unix_destruct_fds;
- scm->fp = NULL;
+ return 0;
}
/*
@@ -1368,8 +1377,11 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto out;
memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
- if (siocb->scm->fp)
- unix_attach_fds(siocb->scm, skb);
+ if (siocb->scm->fp) {
+ err = unix_attach_fds(siocb->scm, skb);
+ if (err)
+ goto out_free;
+ }
unix_get_secdata(siocb->scm, skb);
skb_reset_transport_header(skb);
@@ -1538,8 +1550,13 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
size = min_t(int, size, skb_tailroom(skb));
memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
- if (siocb->scm->fp)
- unix_attach_fds(siocb->scm, skb);
+ if (siocb->scm->fp) {
+ err = unix_attach_fds(siocb->scm, skb);
+ if (err) {
+ kfree_skb(skb);
+ goto out_err;
+ }
+ }
if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
kfree_skb(skb);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 00734e22ec1..5a0061d6b9b 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -186,8 +186,17 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
*/
struct sock *sk = unix_get_socket(*fp++);
if (sk) {
- hit = true;
- func(unix_sk(sk));
+ struct unix_sock *u = unix_sk(sk);
+
+ /*
+ * Ignore non-candidates, they could
+ * have been added to the queues after
+ * starting the garbage collection
+ */
+ if (u->gc_candidate) {
+ hit = true;
+ func(u);
+ }
}
}
if (hit && hitlist != NULL) {
@@ -249,11 +258,11 @@ static void inc_inflight_move_tail(struct unix_sock *u)
{
atomic_long_inc(&u->inflight);
/*
- * If this is still a candidate, move it to the end of the
- * list, so that it's checked even if it was already passed
- * over
+ * If this still might be part of a cycle, move it to the end
+ * of the list, so that it's checked even if it was already
+ * passed over
*/
- if (u->gc_candidate)
+ if (u->gc_maybe_cycle)
list_move_tail(&u->link, &gc_candidates);
}
@@ -267,6 +276,7 @@ void unix_gc(void)
struct unix_sock *next;
struct sk_buff_head hitlist;
struct list_head cursor;
+ LIST_HEAD(not_cycle_list);
spin_lock(&unix_gc_lock);
@@ -282,10 +292,14 @@ void unix_gc(void)
*
* Holding unix_gc_lock will protect these candidates from
* being detached, and hence from gaining an external
- * reference. This also means, that since there are no
- * possible receivers, the receive queues of these sockets are
- * static during the GC, even though the dequeue is done
- * before the detach without atomicity guarantees.
+ * reference. Since there are no possible receivers, all
+ * buffers currently on the candidates' queues stay there
+ * during the garbage collection.
+ *
+ * We also know that no new candidate can be added onto the
+ * receive queues. Other, non candidate sockets _can_ be
+ * added to queue, so we must make sure only to touch
+ * candidates.
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
long total_refs;
@@ -299,6 +313,7 @@ void unix_gc(void)
if (total_refs == inflight_refs) {
list_move_tail(&u->link, &gc_candidates);
u->gc_candidate = 1;
+ u->gc_maybe_cycle = 1;
}
}
@@ -325,14 +340,24 @@ void unix_gc(void)
list_move(&cursor, &u->link);
if (atomic_long_read(&u->inflight) > 0) {
- list_move_tail(&u->link, &gc_inflight_list);
- u->gc_candidate = 0;
+ list_move_tail(&u->link, &not_cycle_list);
+ u->gc_maybe_cycle = 0;
scan_children(&u->sk, inc_inflight_move_tail, NULL);
}
}
list_del(&cursor);
/*
+ * not_cycle_list contains those sockets which do not make up a
+ * cycle. Restore these to the inflight list.
+ */
+ while (!list_empty(&not_cycle_list)) {
+ u = list_entry(not_cycle_list.next, struct unix_sock, link);
+ u->gc_candidate = 0;
+ list_move_tail(&u->link, &gc_inflight_list);
+ }
+
+ /*
* Now gc_candidates contains only garbage. Restore original
* inflight counters for these as well, and remove the skbuffs
* which are creating the cycle(s).