summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-10-08 16:22:22 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-08 16:22:22 -0400
commit64b1f00a0830e1c53874067273a096b228d83d36 (patch)
treedd547b0f1d431d0995b8eaa711cedb92399f31fe /net
parent16b99a4f6644d58c94acb4b4253e84049de588c5 (diff)
parent5301e3e117d88ef0967ce278912e54757f1a31a2 (diff)
downloadlinux-exynos-64b1f00a0830e1c53874067273a096b228d83d36.tar.gz
linux-exynos-64b1f00a0830e1c53874067273a096b228d83d36.tar.bz2
linux-exynos-64b1f00a0830e1c53874067273a096b228d83d36.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_netfilter.c11
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/rds/send.c11
-rw-r--r--net/rds/tcp_connect.c5
-rw-r--r--net/rds/threads.c3
-rw-r--r--net/sched/cls_api.c1
-rw-r--r--net/sched/ematch.c5
-rw-r--r--net/sctp/sm_statefuns.c19
9 files changed, 49 insertions, 14 deletions
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index fa1270cc5086..1bada53bb195 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -316,6 +316,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
ETH_HLEN-ETH_ALEN);
/* tell br_dev_xmit to continue with forwarding */
nf_bridge->mask |= BRNF_BRIDGED_DNAT;
+ /* FIXME Need to refragment */
ret = neigh->output(neigh, skb);
}
neigh_release(neigh);
@@ -371,6 +372,10 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct rtable *rt;
int err;
+ int frag_max_size;
+
+ frag_max_size = IPCB(skb)->frag_max_size;
+ BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
if (nf_bridge->mask & BRNF_PKT_TYPE) {
skb->pkt_type = PACKET_OTHERHOST;
@@ -775,13 +780,19 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
int ret;
+ int frag_max_size;
+ /* This is wrong! We should preserve the original fragment
+ * boundaries by preserving frag_list rather than refragmenting.
+ */
if (skb->protocol == htons(ETH_P_IP) &&
skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
!skb_is_gso(skb)) {
+ frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
if (br_parse_ip_options(skb))
/* Drop invalid packet */
return NF_DROP;
+ IPCB(skb)->frag_max_size = frag_max_size;
ret = ip_fragment(skb, br_dev_queue_push_xmit);
} else
ret = br_dev_queue_push_xmit(skb);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index d8cbaa694227..4d783d071305 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -306,10 +306,14 @@ struct net_bridge
struct br_input_skb_cb {
struct net_device *brdev;
+
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
int igmp;
int mrouters_only;
#endif
+
+ u16 frag_max_size;
+
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
bool vlan_filtered;
#endif
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index de3b1c86b8d3..12c3c8ef3849 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -786,7 +786,7 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
encap_limit = t->parms.encap_limit;
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
- fl6.flowi6_proto = IPPROTO_IPIP;
+ fl6.flowi6_proto = IPPROTO_GRE;
dsfield = ipv4_get_dsfield(iph);
@@ -836,7 +836,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
encap_limit = t->parms.encap_limit;
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
- fl6.flowi6_proto = IPPROTO_IPV6;
+ fl6.flowi6_proto = IPPROTO_GRE;
dsfield = ipv6_get_dsfield(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
diff --git a/net/rds/send.c b/net/rds/send.c
index 23718160d71e..0a64541020b0 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -593,8 +593,11 @@ static void rds_send_remove_from_sock(struct list_head *messages, int status)
sock_put(rds_rs_to_sk(rs));
}
rs = rm->m_rs;
- sock_hold(rds_rs_to_sk(rs));
+ if (rs)
+ sock_hold(rds_rs_to_sk(rs));
}
+ if (!rs)
+ goto unlock_and_drop;
spin_lock(&rs->rs_lock);
if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
@@ -638,9 +641,6 @@ unlock_and_drop:
* queue. This means that in the TCP case, the message may not have been
* assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
* checks the RDS_MSG_HAS_ACK_SEQ bit.
- *
- * XXX It's not clear to me how this is safely serialized with socket
- * destruction. Maybe it should bail if it sees SOCK_DEAD.
*/
void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
is_acked_func is_acked)
@@ -711,6 +711,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
*/
if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
spin_unlock_irqrestore(&conn->c_lock, flags);
+ spin_lock_irqsave(&rm->m_rs_lock, flags);
+ rm->m_rs = NULL;
+ spin_unlock_irqrestore(&rm->m_rs_lock, flags);
continue;
}
list_del_init(&rm->m_conn_item);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index a65ee78db0c5..f9f564a6c960 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -106,11 +106,14 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
rds_tcp_set_callbacks(sock, conn);
ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest),
O_NONBLOCK);
- sock = NULL;
rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret);
if (ret == -EINPROGRESS)
ret = 0;
+ if (ret == 0)
+ sock = NULL;
+ else
+ rds_tcp_restore_callbacks(sock, conn->c_transport_data);
out:
if (sock)
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 65eaefcab241..dc2402e871fd 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -78,8 +78,7 @@ void rds_connect_complete(struct rds_connection *conn)
"current state is %d\n",
__func__,
atomic_read(&conn->c_state));
- atomic_set(&conn->c_state, RDS_CONN_ERROR);
- queue_work(rds_wq, &conn->c_down_w);
+ rds_conn_drop(conn);
return;
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 77147c8c4acc..aad6a679fb13 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -549,6 +549,7 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
tcf_tree_lock(tp);
list_splice_init(&dst->actions, &tmp);
list_splice(&src->actions, &dst->actions);
+ dst->type = src->type;
tcf_tree_unlock(tp);
tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
#endif
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 8250c36543d8..6742200b1307 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -528,9 +528,10 @@ pop_stack:
match_idx = stack[--stackp];
cur_match = tcf_em_get_match(tree, match_idx);
+ if (tcf_em_is_inverted(cur_match))
+ res = !res;
+
if (tcf_em_early_end(cur_match, res)) {
- if (tcf_em_is_inverted(cur_match))
- res = !res;
goto pop_stack;
} else {
match_idx++;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index d3f1ea460c50..c8f606324134 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1775,9 +1775,22 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
/* Update the content of current association. */
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
- SCTP_STATE(SCTP_STATE_ESTABLISHED));
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+ if (sctp_state(asoc, SHUTDOWN_PENDING) &&
+ (sctp_sstate(asoc->base.sk, CLOSING) ||
+ sock_flag(asoc->base.sk, SOCK_DEAD))) {
+ /* if were currently in SHUTDOWN_PENDING, but the socket
+ * has been closed by user, don't transition to ESTABLISHED.
+ * Instead trigger SHUTDOWN bundled with COOKIE_ACK.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+ return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
+ SCTP_ST_CHUNK(0), NULL,
+ commands);
+ } else {
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_ESTABLISHED));
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+ }
return SCTP_DISPOSITION_CONSUME;
nomem_ev: