summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/mrp.c4
-rw-r--r--net/9p/client.c55
-rw-r--r--net/batman-adv/bat_iv_ogm.c86
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c4
-rw-r--r--net/batman-adv/distributed-arp-table.c13
-rw-r--r--net/batman-adv/main.c19
-rw-r--r--net/batman-adv/network-coding.c8
-rw-r--r--net/batman-adv/originator.c16
-rw-r--r--net/batman-adv/originator.h1
-rw-r--r--net/batman-adv/soft-interface.c1
-rw-r--r--net/batman-adv/sysfs.c5
-rw-r--r--net/batman-adv/translation-table.c7
-rw-r--r--net/bridge/netfilter/ebt_log.c11
-rw-r--r--net/bridge/netfilter/ebt_ulog.c18
-rw-r--r--net/ceph/osd_client.c7
-rw-r--r--net/compat.c13
-rw-r--r--net/core/dev_addr_lists.c17
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/iovec.c50
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sock.c18
-rw-r--r--net/core/sock_diag.c9
-rw-r--r--net/ipv4/ip_gre.c3
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_tunnel.c6
-rw-r--r--net/ipv4/ip_vti.c3
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c19
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp.c29
-rw-r--r--net/ipv4/tcp_input.c23
-rw-r--r--net/ipv4/tcp_output.c10
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/netfilter.c7
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/tcp_ipv6.c12
-rw-r--r--net/ipv6/udp.c13
-rw-r--r--net/ipv6/udp_impl.h2
-rw-r--r--net/ipv6/udp_offload.c20
-rw-r--r--net/ipv6/udplite.c2
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/irda/irlap_frame.c2
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/l2tp/l2tp_ppp.c6
-rw-r--r--net/netfilter/core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c35
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c2
-rw-r--r--net/netfilter/nf_log.c7
-rw-r--r--net/netfilter/nfnetlink_acct.c7
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c7
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netfilter/nfnetlink_queue_core.c8
-rw-r--r--net/netfilter/xt_LOG.c15
-rw-r--r--net/netfilter/xt_NFLOG.c3
-rw-r--r--net/netfilter/xt_TCPMSS.c6
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c17
-rw-r--r--net/netfilter/xt_addrtype.c27
-rw-r--r--net/netlabel/netlabel_domainhash.c69
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/sched/act_police.c8
-rw-r--r--net/sched/sch_api.c11
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_htb.c42
-rw-r--r--net/sched/sch_tbf.c8
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/socket.c67
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c62
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c58
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c8
-rw-r--r--net/sunrpc/netns.h4
-rw-r--r--net/sunrpc/rpc_pipe.c5
-rw-r--r--net/sunrpc/sched.c8
-rw-r--r--net/sunrpc/svcauth_unix.c12
-rw-r--r--net/xfrm/xfrm_output.c1
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--net/xfrm/xfrm_user.c2
80 files changed, 708 insertions, 386 deletions
diff --git a/net/802/mrp.c b/net/802/mrp.c
index e085bcc754f..1eb05d80b07 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -871,10 +871,10 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
*/
del_timer_sync(&app->join_timer);
- spin_lock(&app->lock);
+ spin_lock_bh(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX);
mrp_pdu_queue(app);
- spin_unlock(&app->lock);
+ spin_unlock_bh(&app->lock);
mrp_queue_xmit(app);
diff --git a/net/9p/client.c b/net/9p/client.c
index 8eb75425e6e..addc116cecf 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -562,36 +562,19 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
if (!p9_is_proto_dotl(c)) {
/* Error is reported in string format */
- uint16_t len;
- /* 7 = header size for RERROR, 2 is the size of string len; */
- int inline_len = in_hdrlen - (7 + 2);
+ int len;
+ /* 7 = header size for RERROR; */
+ int inline_len = in_hdrlen - 7;
- /* Read the size of error string */
- err = p9pdu_readf(req->rc, c->proto_version, "w", &len);
- if (err)
- goto out_err;
-
- ename = kmalloc(len + 1, GFP_NOFS);
- if (!ename) {
- err = -ENOMEM;
+ len = req->rc->size - req->rc->offset;
+ if (len > (P9_ZC_HDR_SZ - 7)) {
+ err = -EFAULT;
goto out_err;
}
- if (len <= inline_len) {
- /* We have error in protocol buffer itself */
- if (pdu_read(req->rc, ename, len)) {
- err = -EFAULT;
- goto out_free;
- }
- } else {
- /*
- * Part of the data is in user space buffer.
- */
- if (pdu_read(req->rc, ename, inline_len)) {
- err = -EFAULT;
- goto out_free;
-
- }
+ ename = &req->rc->sdata[req->rc->offset];
+ if (len > inline_len) {
+ /* We have error in external buffer */
if (kern_buf) {
memcpy(ename + inline_len, uidata,
len - inline_len);
@@ -600,19 +583,19 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
uidata, len - inline_len);
if (err) {
err = -EFAULT;
- goto out_free;
+ goto out_err;
}
}
}
- ename[len] = 0;
- if (p9_is_proto_dotu(c)) {
- /* For dotu we also have error code */
- err = p9pdu_readf(req->rc,
- c->proto_version, "d", &ecode);
- if (err)
- goto out_free;
+ ename = NULL;
+ err = p9pdu_readf(req->rc, c->proto_version, "s?d",
+ &ename, &ecode);
+ if (err)
+ goto out_err;
+
+ if (p9_is_proto_dotu(c))
err = -ecode;
- }
+
if (!err || !IS_ERR_VALUE(err)) {
err = p9_errstr2errno(ename, strlen(ename));
@@ -628,8 +611,6 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
}
return err;
-out_free:
- kfree(ename);
out_err:
p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
return err;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 071f288b77a..f680ee10187 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -29,6 +29,21 @@
#include "bat_algo.h"
#include "network-coding.h"
+/**
+ * batadv_dup_status - duplicate status
+ * @BATADV_NO_DUP: the packet is a duplicate
+ * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
+ * neighbor)
+ * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
+ * @BATADV_PROTECTED: originator is currently protected (after reboot)
+ */
+enum batadv_dup_status {
+ BATADV_NO_DUP = 0,
+ BATADV_ORIG_DUP,
+ BATADV_NEIGH_DUP,
+ BATADV_PROTECTED,
+};
+
static struct batadv_neigh_node *
batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
const uint8_t *neigh_addr,
@@ -650,7 +665,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
const struct batadv_ogm_packet *batadv_ogm_packet,
struct batadv_hard_iface *if_incoming,
const unsigned char *tt_buff,
- int is_duplicate)
+ enum batadv_dup_status dup_status)
{
struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
struct batadv_neigh_node *router = NULL;
@@ -676,7 +691,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
continue;
}
- if (is_duplicate)
+ if (dup_status != BATADV_NO_DUP)
continue;
spin_lock_bh(&tmp_neigh_node->lq_update_lock);
@@ -718,7 +733,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv);
spin_unlock_bh(&neigh_node->lq_update_lock);
- if (!is_duplicate) {
+ if (dup_status == BATADV_NO_DUP) {
orig_node->last_ttl = batadv_ogm_packet->header.ttl;
neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
}
@@ -902,15 +917,16 @@ out:
return ret;
}
-/* processes a batman packet for all interfaces, adjusts the sequence number and
- * finds out whether it is a duplicate.
- * returns:
- * 1 the packet is a duplicate
- * 0 the packet has not yet been received
- * -1 the packet is old and has been received while the seqno window
- * was protected. Caller should drop it.
+/**
+ * batadv_iv_ogm_update_seqnos - process a batman packet for all interfaces,
+ * adjust the sequence number and find out whether it is a duplicate
+ * @ethhdr: ethernet header of the packet
+ * @batadv_ogm_packet: OGM packet to be considered
+ * @if_incoming: interface on which the OGM packet was received
+ *
+ * Returns duplicate status as enum batadv_dup_status
*/
-static int
+static enum batadv_dup_status
batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
const struct batadv_ogm_packet *batadv_ogm_packet,
const struct batadv_hard_iface *if_incoming)
@@ -918,17 +934,18 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_orig_node *orig_node;
struct batadv_neigh_node *tmp_neigh_node;
- int is_duplicate = 0;
+ int is_dup;
int32_t seq_diff;
int need_update = 0;
- int set_mark, ret = -1;
+ int set_mark;
+ enum batadv_dup_status ret = BATADV_NO_DUP;
uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
uint8_t *neigh_addr;
uint8_t packet_count;
orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
if (!orig_node)
- return 0;
+ return BATADV_NO_DUP;
spin_lock_bh(&orig_node->ogm_cnt_lock);
seq_diff = seqno - orig_node->last_real_seqno;
@@ -936,22 +953,29 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
/* signalize caller that the packet is to be dropped. */
if (!hlist_empty(&orig_node->neigh_list) &&
batadv_window_protected(bat_priv, seq_diff,
- &orig_node->batman_seqno_reset))
+ &orig_node->batman_seqno_reset)) {
+ ret = BATADV_PROTECTED;
goto out;
+ }
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node,
&orig_node->neigh_list, list) {
- is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits,
- orig_node->last_real_seqno,
- seqno);
-
neigh_addr = tmp_neigh_node->addr;
+ is_dup = batadv_test_bit(tmp_neigh_node->real_bits,
+ orig_node->last_real_seqno,
+ seqno);
+
if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
- tmp_neigh_node->if_incoming == if_incoming)
+ tmp_neigh_node->if_incoming == if_incoming) {
set_mark = 1;
- else
+ if (is_dup)
+ ret = BATADV_NEIGH_DUP;
+ } else {
set_mark = 0;
+ if (is_dup && (ret != BATADV_NEIGH_DUP))
+ ret = BATADV_ORIG_DUP;
+ }
/* if the window moved, set the update flag. */
need_update |= batadv_bit_get_packet(bat_priv,
@@ -971,8 +995,6 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
orig_node->last_real_seqno = seqno;
}
- ret = is_duplicate;
-
out:
spin_unlock_bh(&orig_node->ogm_cnt_lock);
batadv_orig_node_free_ref(orig_node);
@@ -994,7 +1016,8 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
int is_broadcast = 0, is_bidirect;
bool is_single_hop_neigh = false;
bool is_from_best_next_hop = false;
- int is_duplicate, sameseq, simlar_ttl;
+ int sameseq, similar_ttl;
+ enum batadv_dup_status dup_status;
uint32_t if_incoming_seqno;
uint8_t *prev_sender;
@@ -1138,10 +1161,10 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
if (!orig_node)
return;
- is_duplicate = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet,
- if_incoming);
+ dup_status = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet,
+ if_incoming);
- if (is_duplicate == -1) {
+ if (dup_status == BATADV_PROTECTED) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: packet within seqno protection time (sender: %pM)\n",
ethhdr->h_source);
@@ -1211,11 +1234,12 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
* seqno and similar ttl as the non-duplicate
*/
sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
- simlar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
- if (is_bidirect && (!is_duplicate || (sameseq && simlar_ttl)))
+ similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
+ if (is_bidirect && ((dup_status == BATADV_NO_DUP) ||
+ (sameseq && similar_ttl)))
batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
batadv_ogm_packet, if_incoming,
- tt_buff, is_duplicate);
+ tt_buff, dup_status);
/* is single hop (direct) neighbor */
if (is_single_hop_neigh) {
@@ -1236,7 +1260,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
goto out_neigh;
}
- if (is_duplicate) {
+ if (dup_status == BATADV_NEIGH_DUP) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: duplicate packet received\n");
goto out_neigh;
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 379061c7254..de27b3175cf 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1067,6 +1067,10 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
bat_priv->bla.claim_dest.group = group;
+ /* purge everything when bridge loop avoidance is turned off */
+ if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+ oldif = NULL;
+
if (!oldif) {
batadv_bla_purge_claims(bat_priv, NULL, 1);
batadv_bla_purge_backbone_gw(bat_priv, 1);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 8e15d966d9b..239992021b1 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -837,6 +837,19 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
if (dat_entry) {
+ /* If the ARP request is destined for a local client the local
+ * client will answer itself. DAT would only generate a
+ * duplicate packet.
+ *
+ * Moreover, if the soft-interface is enslaved into a bridge, an
+ * additional DAT answer may trigger kernel warnings about
+ * a packet coming from the wrong port.
+ */
+ if (batadv_is_my_client(bat_priv, dat_entry->mac_addr)) {
+ ret = true;
+ goto out;
+ }
+
skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
bat_priv->soft_iface, ip_dst, hw_src,
dat_entry->mac_addr, hw_src);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 3e30a0f1b90..51aafd669cb 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -163,16 +163,25 @@ void batadv_mesh_free(struct net_device *soft_iface)
batadv_vis_quit(bat_priv);
batadv_gw_node_purge(bat_priv);
- batadv_originator_free(bat_priv);
batadv_nc_free(bat_priv);
+ batadv_dat_free(bat_priv);
+ batadv_bla_free(bat_priv);
+ /* Free the TT and the originator tables only after having terminated
+ * all the other depending components which may use these structures for
+ * their purposes.
+ */
batadv_tt_free(bat_priv);
- batadv_bla_free(bat_priv);
-
- batadv_dat_free(bat_priv);
+ /* Since the originator table clean up routine is accessing the TT
+ * tables as well, it has to be invoked after the TT tables have been
+ * freed and marked as empty. This ensures that no cleanup RCU callbacks
+ * accessing the TT data are scheduled for later execution.
+ */
+ batadv_originator_free(bat_priv);
free_percpu(bat_priv->bat_counters);
+ bat_priv->bat_counters = NULL;
atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
}
@@ -475,7 +484,7 @@ static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
char *algo_name = (char *)val;
size_t name_len = strlen(algo_name);
- if (algo_name[name_len - 1] == '\n')
+ if (name_len > 0 && algo_name[name_len - 1] == '\n')
algo_name[name_len - 1] = '\0';
bat_algo_ops = batadv_algo_get(algo_name);
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index f7c54305a91..e84629ece9b 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -1514,6 +1514,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct ethhdr *ethhdr, ethhdr_tmp;
uint8_t *orig_dest, ttl, ttvn;
unsigned int coding_len;
+ int err;
/* Save headers temporarily */
memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp));
@@ -1568,8 +1569,11 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
coding_len);
/* Resize decoded skb if decoded with larger packet */
- if (nc_packet->skb->len > coding_len + h_size)
- pskb_trim_rcsum(skb, coding_len + h_size);
+ if (nc_packet->skb->len > coding_len + h_size) {
+ err = pskb_trim_rcsum(skb, coding_len + h_size);
+ if (err)
+ return NULL;
+ }
/* Create decoded unicast packet */
unicast_packet = (struct batadv_unicast_packet *)skb->data;
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 2f345254663..fad1a2093e1 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -156,12 +156,28 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
kfree(orig_node);
}
+/**
+ * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
+ * schedule an rcu callback for freeing it
+ * @orig_node: the orig node to free
+ */
void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
{
if (atomic_dec_and_test(&orig_node->refcount))
call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
}
+/**
+ * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
+ * possibly free it (without rcu callback)
+ * @orig_node: the orig node to free
+ */
+void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
+{
+ if (atomic_dec_and_test(&orig_node->refcount))
+ batadv_orig_node_free_rcu(&orig_node->rcu);
+}
+
void batadv_originator_free(struct batadv_priv *bat_priv)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 7df48fa7669..734e5a3d8a5 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -26,6 +26,7 @@ int batadv_originator_init(struct batadv_priv *bat_priv);
void batadv_originator_free(struct batadv_priv *bat_priv);
void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
+void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
const uint8_t *addr);
struct batadv_neigh_node *
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 6f20d339e33..819dfb006cd 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -505,6 +505,7 @@ unreg_debugfs:
batadv_debugfs_del_meshif(dev);
free_bat_counters:
free_percpu(bat_priv->bat_counters);
+ bat_priv->bat_counters = NULL;
return ret;
}
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 15a22efa9a6..929e304dacb 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -582,10 +582,7 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
(strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
goto out;
- if (!rtnl_trylock()) {
- ret = -ERESTARTSYS;
- goto out;
- }
+ rtnl_lock();
if (status_tmp == BATADV_IF_NOT_IN_USE) {
batadv_hardif_disable_interface(hard_iface,
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 5e89deeb954..9e874857584 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -144,7 +144,12 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
struct batadv_tt_orig_list_entry *orig_entry;
orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
- batadv_orig_node_free_ref(orig_entry->orig_node);
+
+ /* We are in an rcu callback here, therefore we cannot use
+ * batadv_orig_node_free_ref() and its call_rcu():
+ * An rcu_barrier() wouldn't wait for that to finish
+ */
+ batadv_orig_node_free_ref_now(orig_entry->orig_node);
kfree(orig_entry);
}
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 9878eb8204c..19c37a4929b 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -72,13 +72,12 @@ print_ports(const struct sk_buff *skb, uint8_t protocol, int offset)
}
static void
-ebt_log_packet(u_int8_t pf, unsigned int hooknum,
- const struct sk_buff *skb, const struct net_device *in,
- const struct net_device *out, const struct nf_loginfo *loginfo,
- const char *prefix)
+ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
+ const struct sk_buff *skb, const struct net_device *in,
+ const struct net_device *out, const struct nf_loginfo *loginfo,
+ const char *prefix)
{
unsigned int bitmask;
- struct net *net = dev_net(in ? in : out);
/* FIXME: Disabled from containers until syslog ns is supported */
if (!net_eq(net, &init_net))
@@ -191,7 +190,7 @@ ebt_log_tg(struct sk_buff *skb, const struct xt_action_param *par)
nf_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb,
par->in, par->out, &li, "%s", info->prefix);
else
- ebt_log_packet(NFPROTO_BRIDGE, par->hooknum, skb, par->in,
+ ebt_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb, par->in,
par->out, &li, info->prefix);
return EBT_CONTINUE;
}
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index fc1905c5141..df0364aa12d 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -131,14 +131,16 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
return skb;
}
-static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
- const struct net_device *in, const struct net_device *out,
- const struct ebt_ulog_info *uloginfo, const char *prefix)
+static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct ebt_ulog_info *uloginfo,
+ const char *prefix)
{
ebt_ulog_packet_msg_t *pm;
size_t size, copy_len;
struct nlmsghdr *nlh;
- struct net *net = dev_net(in ? in : out);
struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
unsigned int group = uloginfo->nlgroup;
ebt_ulog_buff_t *ub = &ebt->ulog_buffers[group];
@@ -233,7 +235,7 @@ unlock:
}
/* this function is registered with the netfilter core */
-static void ebt_log_packet(u_int8_t pf, unsigned int hooknum,
+static void ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
const struct sk_buff *skb, const struct net_device *in,
const struct net_device *out, const struct nf_loginfo *li,
const char *prefix)
@@ -252,13 +254,15 @@ static void ebt_log_packet(u_int8_t pf, unsigned int hooknum,
strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
}
- ebt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
+ ebt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
}
static unsigned int
ebt_ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
- ebt_ulog_packet(par->hooknum, skb, par->in, par->out,
+ struct net *net = dev_net(par->in ? par->in : par->out);
+
+ ebt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
par->targinfo, NULL);
return EBT_CONTINUE;
}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index a3395fdfbd4..3a246a6cab4 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1204,6 +1204,7 @@ void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
mutex_lock(&osdc->request_mutex);
if (req->r_linger) {
__unregister_linger_request(osdc, req);
+ req->r_linger = 0;
ceph_osdc_put_request(req);
}
mutex_unlock(&osdc->request_mutex);
@@ -1674,13 +1675,13 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
__register_request(osdc, req);
__unregister_linger_request(osdc, req);
}
+ reset_changed_osds(osdc);
mutex_unlock(&osdc->request_mutex);
if (needmap) {
dout("%d requests for down osds, need new map\n", needmap);
ceph_monc_request_next_osdmap(&osdc->client->monc);
}
- reset_changed_osds(osdc);
}
@@ -2120,7 +2121,9 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
down_read(&osdc->map_sem);
mutex_lock(&osdc->request_mutex);
__register_request(osdc, req);
- WARN_ON(req->r_sent);
+ req->r_sent = 0;
+ req->r_got_reply = 0;
+ req->r_completed = 0;
rc = __map_request(osdc, req, 0);
if (rc < 0) {
if (nofail) {
diff --git a/net/compat.c b/net/compat.c
index 79ae8848500..f0a1ba6c808 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -734,19 +734,25 @@ static unsigned char nas[21] = {
asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
{
- return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+ return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
}
asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags)
{
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT);
}
asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
{
- return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+ return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
}
asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags)
@@ -768,6 +774,9 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
int datagrams;
struct timespec ktspec;
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+
if (COMPAT_USE_64BIT_TIME)
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT,
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index c013f38482a..6cda4e2c213 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -39,6 +39,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
ha->refcount = 1;
ha->global_use = global;
ha->synced = sync;
+ ha->sync_cnt = 0;
list_add_tail_rcu(&ha->list, &list->list);
list->count++;
@@ -66,7 +67,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
}
if (sync) {
if (ha->synced)
- return 0;
+ return -EEXIST;
else
ha->synced = true;
}
@@ -139,10 +140,13 @@ static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list,
err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type,
false, true);
- if (err)
+ if (err && err != -EEXIST)
return err;
- ha->sync_cnt++;
- ha->refcount++;
+
+ if (!err) {
+ ha->sync_cnt++;
+ ha->refcount++;
+ }
return 0;
}
@@ -159,7 +163,8 @@ static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list,
if (err)
return;
ha->sync_cnt--;
- __hw_addr_del_entry(from_list, ha, false, true);
+ /* address on from list is not marked synced */
+ __hw_addr_del_entry(from_list, ha, false, false);
}
static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
@@ -796,7 +801,7 @@ int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
return -EINVAL;
netif_addr_lock_nested(to);
- err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
+ err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
netif_addr_unlock(to);
diff --git a/net/core/filter.c b/net/core/filter.c
index dad2a178f9f..6438f29ff26 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -778,7 +778,7 @@ int sk_detach_filter(struct sock *sk)
}
EXPORT_SYMBOL_GPL(sk_detach_filter);
-static void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
+void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
{
static const u16 decodes[] = {
[BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 7e7aeb01de4..de178e46268 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -75,31 +75,6 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
/*
* Copy kernel to iovec. Returns -EFAULT on error.
- *
- * Note: this modifies the original iovec.
- */
-
-int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
-{
- while (len > 0) {
- if (iov->iov_len) {
- int copy = min_t(unsigned int, iov->iov_len, len);
- if (copy_to_user(iov->iov_base, kdata, copy))
- return -EFAULT;
- kdata += copy;
- len -= copy;
- iov->iov_len -= copy;
- iov->iov_base += copy;
- }
- iov++;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(memcpy_toiovec);
-
-/*
- * Copy kernel to iovec. Returns -EFAULT on error.
*/
int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
@@ -125,31 +100,6 @@ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
EXPORT_SYMBOL(memcpy_toiovecend);
/*
- * Copy iovec to kernel. Returns -EFAULT on error.
- *
- * Note: this modifies the original iovec.
- */
-
-int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
-{
- while (len > 0) {
- if (iov->iov_len) {
- int copy = min_t(unsigned int, len, iov->iov_len);
- if (copy_from_user(kdata, iov->iov_base, copy))
- return -EFAULT;
- len -= copy;
- kdata += copy;
- iov->iov_base += copy;
- iov->iov_len -= copy;
- }
- iov++;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(memcpy_fromiovec);
-
-/*
* Copy iovec from kernel. Returns -EFAULT on error.
*/
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index af9185d0be6..cfd777bd6bd 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -195,7 +195,7 @@ struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
- skb->data = NULL;
+ skb->head = NULL;
skb->truesize = sizeof(struct sk_buff);
atomic_set(&skb->users, 1);
@@ -611,7 +611,7 @@ static void skb_release_head_state(struct sk_buff *skb)
static void skb_release_all(struct sk_buff *skb)
{
skb_release_head_state(skb);
- if (likely(skb->data))
+ if (likely(skb->head))
skb_release_data(skb);
}
diff --git a/net/core/sock.c b/net/core/sock.c
index d4f4cea726e..88868a9d21d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -210,7 +210,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
"sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
"sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
"sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
- "sk_lock-AF_NFC" , "sk_lock-AF_MAX"
+ "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
};
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
"slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
@@ -226,7 +226,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
"slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
"slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
"slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
- "slock-AF_NFC" , "slock-AF_MAX"
+ "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
};
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
"clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
@@ -242,7 +242,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
"clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
"clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
"clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
- "clock-AF_NFC" , "clock-AF_MAX"
+ "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
};
/*
@@ -1217,18 +1217,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
#endif
}
-/*
- * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
- * un-modified. Special care is taken when initializing object to zero.
- */
-static inline void sk_prot_clear_nulls(struct sock *sk, int size)
-{
- if (offsetof(struct sock, sk_node.next) != 0)
- memset(sk, 0, offsetof(struct sock, sk_node.next));
- memset(&sk->sk_node.pprev, 0,
- size - offsetof(struct sock, sk_node.pprev));
-}
-
void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
{
unsigned long nulls1, nulls2;
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index d5bef0b0f63..a0e9cf6379d 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -73,8 +73,13 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
goto out;
}
- if (filter)
- memcpy(nla_data(attr), filter->insns, len);
+ if (filter) {
+ struct sock_filter *fb = (struct sock_filter *)nla_data(attr);
+ int i;
+
+ for (i = 0; i < filter->len; i++, fb++)
+ sk_decode_filter(&filter->insns[i], fb);
+ }
out:
rcu_read_unlock();
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index c625e4dad4b..2a83591492d 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -235,7 +235,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
*/
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn;
- const struct iphdr *iph = (const struct iphdr *)skb->data;
+ const struct iphdr *iph;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct ip_tunnel *t;
@@ -281,6 +281,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
else
itn = net_generic(net, ipgre_net_id);
+ iph = (const struct iphdr *)skb->data;
t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi.flags,
iph->daddr, iph->saddr, tpi.key);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 147abf5275a..4bcabf3ab4c 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -84,7 +84,7 @@ int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
EXPORT_SYMBOL(sysctl_ip_default_ttl);
/* Generate a checksum for an outgoing IP datagram. */
-__inline__ void ip_send_check(struct iphdr *iph)
+void ip_send_check(struct iphdr *iph)
{
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index e4147ec1665..7fa8f08fa7a 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -503,6 +503,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
dst = tnl_params->daddr;
if (dst == 0) {
/* NBMA tunnel */
@@ -658,7 +659,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
/* Push down and install the IP header. */
skb_push(skb, sizeof(struct iphdr));
@@ -853,7 +853,7 @@ void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
}
EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
-int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
+int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname)
{
struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
@@ -899,7 +899,7 @@ static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head)
unregister_netdevice_queue(itn->fb_tunnel_dev, head);
}
-void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn)
+void ip_tunnel_delete_net(struct ip_tunnel_net *itn)
{
LIST_HEAD(list);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 9d2bdb2c1d3..c118f6b576b 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -361,8 +361,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
tunnel->err_count = 0;
}
- IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
- IPSKB_REROUTED);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
nf_reset(skb);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index f8a222cb644..ff4b781b105 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -162,7 +162,8 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
return skb;
}
-static void ipt_ulog_packet(unsigned int hooknum,
+static void ipt_ulog_packet(struct net *net,
+ unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -174,7 +175,6 @@ static void ipt_ulog_packet(unsigned int hooknum,
size_t size, copy_len;
struct nlmsghdr *nlh;
struct timeval tv;
- struct net *net = dev_net(in ? in : out);
struct ulog_net *ulog = ulog_pernet(net);
/* ffs == find first bit set, necessary because userspace
@@ -231,8 +231,10 @@ static void ipt_ulog_packet(unsigned int hooknum,
put_unaligned(tv.tv_usec, &pm->timestamp_usec);
put_unaligned(skb->mark, &pm->mark);
pm->hook = hooknum;
- if (prefix != NULL)
- strncpy(pm->prefix, prefix, sizeof(pm->prefix));
+ if (prefix != NULL) {
+ strncpy(pm->prefix, prefix, sizeof(pm->prefix) - 1);
+ pm->prefix[sizeof(pm->prefix) - 1] = '\0';
+ }
else if (loginfo->prefix[0] != '\0')
strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
else
@@ -291,12 +293,15 @@ alloc_failure:
static unsigned int
ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
- ipt_ulog_packet(par->hooknum, skb, par->in, par->out,
+ struct net *net = dev_net(par->in ? par->in : par->out);
+
+ ipt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
par->targinfo, NULL);
return XT_CONTINUE;
}
-static void ipt_logfn(u_int8_t pf,
+static void ipt_logfn(struct net *net,
+ u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
@@ -318,7 +323,7 @@ static void ipt_logfn(u_int8_t pf,
strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
}
- ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
+ ipt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
}
static int ulog_tg_check(const struct xt_tgchk_param *par)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 550781a17b3..d35bbf0cf40 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -737,10 +737,15 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
{
struct rtable *rt;
struct flowi4 fl4;
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ int oif = skb->dev->ifindex;
+ u8 tos = RT_TOS(iph->tos);
+ u8 prot = iph->protocol;
+ u32 mark = skb->mark;
rt = (struct rtable *) dst;
- ip_rt_build_flow_key(&fl4, sk, skb);
+ __build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0);
__ip_do_redirect(rt, skb, &fl4, true);
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index dcb116dde21..ab450c099aa 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2887,6 +2887,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
unsigned int mss;
struct sk_buff *gso_skb = skb;
__sum16 newcheck;
+ bool ooo_okay, copy_destructor;
if (!pskb_may_pull(skb, sizeof(*th)))
goto out;
@@ -2927,10 +2928,18 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
goto out;
}
+ copy_destructor = gso_skb->destructor == tcp_wfree;
+ ooo_okay = gso_skb->ooo_okay;
+ /* All segments but the first should have ooo_okay cleared */
+ skb->ooo_okay = 0;
+
segs = skb_segment(skb, features);
if (IS_ERR(segs))
goto out;
+ /* Only first segment might have ooo_okay set */
+ segs->ooo_okay = ooo_okay;
+
delta = htonl(oldlen + (thlen + mss));
skb = segs;
@@ -2950,6 +2959,17 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
thlen, skb->csum));
seq += mss;
+ if (copy_destructor) {
+ skb->destructor = gso_skb->destructor;
+ skb->sk = gso_skb->sk;
+ /* {tcp|sock}_wfree() use exact truesize accounting :
+ * sum(skb->truesize) MUST be exactly be gso_skb->truesize
+ * So we account mss bytes of 'true size' for each segment.
+ * The last segment will contain the remaining.
+ */
+ skb->truesize = mss;
+ gso_skb->truesize -= mss;
+ }
skb = skb->next;
th = tcp_hdr(skb);
@@ -2962,7 +2982,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
* is freed at TX completion, and not right now when gso_skb
* is freed by GSO engine
*/
- if (gso_skb->destructor == tcp_wfree) {
+ if (copy_destructor) {
swap(gso_skb->sk, skb->sk);
swap(gso_skb->destructor, skb->destructor);
swap(gso_skb->truesize, skb->truesize);
@@ -3269,8 +3289,11 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
for (i = 0; i < shi->nr_frags; ++i) {
const struct skb_frag_struct *f = &shi->frags[i];
- struct page *page = skb_frag_page(f);
- sg_set_page(&sg, page, skb_frag_size(f), f->page_offset);
+ unsigned int offset = f->page_offset;
+ struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
+
+ sg_set_page(&sg, page, skb_frag_size(f),
+ offset_in_page(offset));
if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
return 1;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 08bbe609652..9c6225780bd 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2743,8 +2743,8 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
* tcp_xmit_retransmit_queue().
*/
static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
- int prior_sacked, bool is_dupack,
- int flag)
+ int prior_sacked, int prior_packets,
+ bool is_dupack, int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -2804,7 +2804,8 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
tcp_add_reno_sack(sk);
} else
do_lost = tcp_try_undo_partial(sk, pkts_acked);
- newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
+ newly_acked_sacked = prior_packets - tp->packets_out +
+ tp->sacked_out - prior_sacked;
break;
case TCP_CA_Loss:
tcp_process_loss(sk, flag, is_dupack);
@@ -2818,7 +2819,8 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
if (is_dupack)
tcp_add_reno_sack(sk);
}
- newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
+ newly_acked_sacked = prior_packets - tp->packets_out +
+ tp->sacked_out - prior_sacked;
if (icsk->icsk_ca_state <= TCP_CA_Disorder)
tcp_try_undo_dsack(sk);
@@ -3330,9 +3332,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
bool is_dupack = false;
u32 prior_in_flight;
u32 prior_fackets;
- int prior_packets;
+ int prior_packets = tp->packets_out;
int prior_sacked = tp->sacked_out;
int pkts_acked = 0;
+ int previous_packets_out = 0;
/* If the ack is older than previous acks
* then we can probably ignore it.
@@ -3403,14 +3406,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
sk->sk_err_soft = 0;
icsk->icsk_probes_out = 0;
tp->rcv_tstamp = tcp_time_stamp;
- prior_packets = tp->packets_out;
if (!prior_packets)
goto no_queue;
/* See if we can take anything off of the retransmit queue. */
+ previous_packets_out = tp->packets_out;
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
- pkts_acked = prior_packets - tp->packets_out;
+ pkts_acked = previous_packets_out - tp->packets_out;
if (tcp_ack_is_dubious(sk, flag)) {
/* Advance CWND, if state allows this. */
@@ -3418,7 +3421,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_cong_avoid(sk, ack, prior_in_flight);
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
- is_dupack, flag);
+ prior_packets, is_dupack, flag);
} else {
if (flag & FLAG_DATA_ACKED)
tcp_cong_avoid(sk, ack, prior_in_flight);
@@ -3441,7 +3444,7 @@ no_queue:
/* If data was DSACKed, see if we can undo a cwnd reduction. */
if (flag & FLAG_DSACKING_ACK)
tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
- is_dupack, flag);
+ prior_packets, is_dupack, flag);
/* If this ack opens up a zero window, clear backoff. It was
* being used to time the probes, and is probably far higher than
* it needs to be for normal retransmission.
@@ -3464,7 +3467,7 @@ old_ack:
if (TCP_SKB_CB(skb)->sacked) {
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
- is_dupack, flag);
+ prior_packets, is_dupack, flag);
}
SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 536d40929ba..ec335fabd5c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -874,11 +874,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
&md5);
tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
- if (tcp_packets_in_flight(tp) == 0) {
+ if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START);
- skb->ooo_okay = 1;
- } else
- skb->ooo_okay = 0;
+
+ /* if no packet is in qdisc/device queue, then allow XPS to select
+ * another queue.
+ */
+ skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d1ab6ab29a5..1bbf744c2cc 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1487,7 +1487,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
}
int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
- struct net_device *dev, int strict)
+ const struct net_device *dev, int strict)
{
struct inet6_ifaddr *ifp;
unsigned int hash = inet6_addr_hash(addr);
@@ -2658,8 +2658,10 @@ static void init_loopback(struct net_device *dev)
sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
/* Failure cases are ignored */
- if (!IS_ERR(sp_rt))
+ if (!IS_ERR(sp_rt)) {
+ sp_ifa->rt = sp_rt;
ip6_ins_rt(sp_rt);
+ }
}
read_unlock_bh(&idev->lock);
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index d3ddd840035..ecd60733e5e 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1081,6 +1081,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
}
if (t == NULL)
t = netdev_priv(dev);
+ memset(&p, 0, sizeof(p));
ip6gre_tnl_parm_to_user(&p, &t->parms);
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
@@ -1128,6 +1129,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
if (t) {
err = 0;
+ memset(&p, 0, sizeof(p));
ip6gre_tnl_parm_to_user(&p, &t->parms);
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d2eedf19233..dae1949019d 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1147,7 +1147,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
if (WARN_ON(np->cork.opt))
return -EINVAL;
- np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
+ np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
if (unlikely(np->cork.opt == NULL))
return -ENOBUFS;
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 72836f40b73..95f3f1da0d7 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -10,6 +10,7 @@
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <linux/export.h>
+#include <net/addrconf.h>
#include <net/dst.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
@@ -186,6 +187,10 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
return csum;
};
+static const struct nf_ipv6_ops ipv6ops = {
+ .chk_addr = ipv6_chk_addr,
+};
+
static const struct nf_afinfo nf_ip6_afinfo = {
.family = AF_INET6,
.checksum = nf_ip6_checksum,
@@ -198,6 +203,7 @@ static const struct nf_afinfo nf_ip6_afinfo = {
int __init ipv6_netfilter_init(void)
{
+ RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops);
return nf_register_afinfo(&nf_ip6_afinfo);
}
@@ -206,5 +212,6 @@ int __init ipv6_netfilter_init(void)
*/
void ipv6_netfilter_fini(void)
{
+ RCU_INIT_POINTER(nf_ipv6_ops, NULL);
nf_unregister_afinfo(&nf_ip6_afinfo);
}
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index f3c1ff4357f..51c3285b5d9 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -90,7 +90,7 @@ static const struct snmp_mib snmp6_ipstats_list[] = {
SNMP_MIB_ITEM("Ip6OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
- SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS),
+ /* IPSTATS_MIB_CSUMERRORS is not relevant in IPv6 (no checksum) */
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 71167069b39..0a17ed9eaf3 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1890,6 +1890,17 @@ void tcp6_proc_exit(struct net *net)
}
#endif
+static void tcp_v6_clear_sk(struct sock *sk, int size)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ /* we do not want to clear pinet6 field, because of RCU lookups */
+ sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
+
+ size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
+ memset(&inet->pinet6 + 1, 0, size);
+}
+
struct proto tcpv6_prot = {
.name = "TCPv6",
.owner = THIS_MODULE,
@@ -1933,6 +1944,7 @@ struct proto tcpv6_prot = {
#ifdef CONFIG_MEMCG_KMEM
.proto_cgroup = tcp_proto_cgroup,
#endif
+ .clear_sk = tcp_v6_clear_sk,
};
static const struct inet6_protocol tcpv6_protocol = {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d4defdd4493..42923b14dfa 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1432,6 +1432,17 @@ void udp6_proc_exit(struct net *net) {
}
#endif /* CONFIG_PROC_FS */
+void udp_v6_clear_sk(struct sock *sk, int size)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ /* we do not want to clear pinet6 field, because of RCU lookups */
+ sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
+
+ size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
+ memset(&inet->pinet6 + 1, 0, size);
+}
+
/* ------------------------------------------------------------------------ */
struct proto udpv6_prot = {
@@ -1462,7 +1473,7 @@ struct proto udpv6_prot = {
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
- .clear_sk = sk_prot_clear_portaddr_nulls,
+ .clear_sk = udp_v6_clear_sk,
};
static struct inet_protosw udpv6_protosw = {
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index d7571046bfc..4691ed50a92 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -31,6 +31,8 @@ extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
extern void udpv6_destroy_sock(struct sock *sk);
+extern void udp_v6_clear_sk(struct sock *sk, int size);
+
#ifdef CONFIG_PROC_FS
extern int udp6_seq_show(struct seq_file *seq, void *v);
#endif
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 3bb3a891a42..d3cfaf9c7a0 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -46,11 +46,12 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
unsigned int mss;
unsigned int unfrag_ip6hlen, unfrag_len;
struct frag_hdr *fptr;
- u8 *mac_start, *prevhdr;
+ u8 *packet_start, *prevhdr;
u8 nexthdr;
u8 frag_hdr_sz = sizeof(struct frag_hdr);
int offset;
__wsum csum;
+ int tnl_hlen;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
@@ -83,9 +84,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
skb->ip_summed = CHECKSUM_NONE;
/* Check if there is enough headroom to insert fragment header. */
- if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) &&
- pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
- goto out;
+ tnl_hlen = skb_tnl_header_len(skb);
+ if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) {
+ if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
+ goto out;
+ }
/* Find the unfragmentable header and shift it left by frag_hdr_sz
* bytes to insert fragment header.
@@ -93,11 +96,12 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT;
- unfrag_len = skb_network_header(skb) - skb_mac_header(skb) +
- unfrag_ip6hlen;
- mac_start = skb_mac_header(skb);
- memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len);
+ unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
+ unfrag_ip6hlen + tnl_hlen;
+ packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
+ memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
+ SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
skb->mac_header -= frag_hdr_sz;
skb->network_header -= frag_hdr_sz;
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 1d08e21d9f6..dfcc4be4689 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -56,7 +56,7 @@ struct proto udplitev6_prot = {
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
- .clear_sk = sk_prot_clear_portaddr_nulls,
+ .clear_sk = udp_v6_clear_sk,
};
static struct inet_protosw udplite6_protosw = {
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 4ef7bdb6544..23ed03d786c 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -103,8 +103,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
dev_hold(dev);
xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
- if (!xdst->u.rt6.rt6i_idev)
+ if (!xdst->u.rt6.rt6i_idev) {
+ dev_put(dev);
return -ENODEV;
+ }
rt6_transfer_peer(&xdst->u.rt6, rt);
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 8c004161a84..9ea0c933b9f 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -544,7 +544,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
/*
* We now have some discovery info to deliver!
*/
- discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC);
+ discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC);
if (!discovery) {
IRDA_WARNING("%s: unable to malloc!\n", __func__);
return;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 5b1e5af2571..c5fbd758968 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2366,6 +2366,8 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
out:
xfrm_pol_put(xp);
+ if (err == 0)
+ xfrm_garbage_collect(net);
return err;
}
@@ -2615,6 +2617,8 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
out:
xfrm_pol_put(xp);
+ if (delete && err == 0)
+ xfrm_garbage_collect(net);
return err;
}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 637a341c1e2..8dec6876dc5 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -346,19 +346,19 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
skb_put(skb, 2);
/* Copy user data into skb */
- error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
+ error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov,
+ total_len);
if (error < 0) {
kfree_skb(skb);
goto error_put_sess_tun;
}
- skb_put(skb, total_len);
l2tp_xmit_skb(session, skb, session->hdr_len);
sock_put(ps->tunnel_sock);
sock_put(sk);
- return error;
+ return total_len;
error_put_sess_tun:
sock_put(ps->tunnel_sock);
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 07c865a31a3..857ca9f3517 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -30,6 +30,8 @@ static DEFINE_MUTEX(afinfo_mutex);
const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
EXPORT_SYMBOL(nf_afinfo);
+const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ipv6_ops);
int nf_register_afinfo(const struct nf_afinfo *afinfo)
{
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 085b5880ab0..05565d2b3a6 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1001,6 +1001,32 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
return th->rst;
}
+static inline bool is_new_conn(const struct sk_buff *skb,
+ struct ip_vs_iphdr *iph)
+{
+ switch (iph->protocol) {
+ case IPPROTO_TCP: {
+ struct tcphdr _tcph, *th;
+
+ th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
+ if (th == NULL)
+ return false;
+ return th->syn;
+ }
+ case IPPROTO_SCTP: {
+ sctp_chunkhdr_t *sch, schunk;
+
+ sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
+ sizeof(schunk), &schunk);
+ if (sch == NULL)
+ return false;
+ return sch->type == SCTP_CID_INIT;
+ }
+ default:
+ return false;
+ }
+}
+
/* Handle response packets: rewrite addresses and send away...
*/
static unsigned int
@@ -1612,6 +1638,15 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
* Check if the packet belongs to an existing connection entry
*/
cp = pp->conn_in_get(af, skb, &iph, 0);
+
+ if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp && cp->dest &&
+ unlikely(!atomic_read(&cp->dest->weight)) && !iph.fragoffs &&
+ is_new_conn(skb, &iph)) {
+ ip_vs_conn_expire_now(cp);
+ __ip_vs_conn_put(cp);
+ cp = NULL;
+ }
+
if (unlikely(!cp) && !iph.fragoffs) {
/* No (second) fragments need to enter here, as nf_defrag_ipv6
* replayed fragment zero will already have created the cp
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 5b142fb1648..9e6c2a075a4 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2542,6 +2542,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
struct ip_vs_dest *dest;
struct ip_vs_dest_entry entry;
+ memset(&entry, 0, sizeof(entry));
list_for_each_entry(dest, &svc->destinations, n_list) {
if (count >= get->num_dests)
break;
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 0df269d7c99..a65edfe4b16 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -67,8 +67,8 @@ struct ip_vs_sh_bucket {
#define IP_VS_SH_TAB_MASK (IP_VS_SH_TAB_SIZE - 1)
struct ip_vs_sh_state {
- struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE];
struct rcu_head rcu_head;
+ struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE];
};
/*
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 388656d5a9e..3b18dd1be7d 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -148,7 +148,7 @@ void nf_log_packet(struct net *net,
va_start(args, fmt);
vsnprintf(prefix, sizeof(prefix), fmt, args);
va_end(args);
- logger->logfn(pf, hooknum, skb, in, out, loginfo, prefix);
+ logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix);
}
rcu_read_unlock();
}
@@ -368,17 +368,20 @@ static int __net_init nf_log_net_init(struct net *net)
return 0;
out_sysctl:
+#ifdef CONFIG_PROC_FS
/* For init_net: errors will trigger panic, don't unroll on error. */
if (!net_eq(net, &init_net))
remove_proc_entry("nf_log", net->nf.proc_netfilter);
-
+#endif
return ret;
}
static void __net_exit nf_log_net_exit(struct net *net)
{
netfilter_log_sysctl_exit(net);
+#ifdef CONFIG_PROC_FS
remove_proc_entry("nf_log", net->nf.proc_netfilter);
+#endif
}
static struct pernet_operations nf_log_net_ops = {
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index dc3fd5d4446..c7b6d466a66 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -149,9 +149,12 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
- if (last && cur != last)
- continue;
+ if (last) {
+ if (cur != last)
+ continue;
+ last = NULL;
+ }
if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 701c88a20fe..65074dfb938 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -220,9 +220,12 @@ ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
list_for_each_entry_rcu(cur, &cttimeout_list, head) {
- if (last && cur != last)
- continue;
+ if (last) {
+ if (cur != last)
+ continue;
+ last = NULL;
+ }
if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index faf1e9300d8..962e9792e31 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -602,7 +602,8 @@ static struct nf_loginfo default_loginfo = {
/* log handler for internal netfilter logging api */
void
-nfulnl_log_packet(u_int8_t pf,
+nfulnl_log_packet(struct net *net,
+ u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
@@ -615,7 +616,6 @@ nfulnl_log_packet(u_int8_t pf,
const struct nf_loginfo *li;
unsigned int qthreshold;
unsigned int plen;
- struct net *net = dev_net(in ? in : out);
struct nfnl_log_net *log = nfnl_log_pernet(net);
if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
@@ -1045,7 +1045,9 @@ static int __net_init nfnl_log_net_init(struct net *net)
static void __net_exit nfnl_log_net_exit(struct net *net)
{
+#ifdef CONFIG_PROC_FS
remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
+#endif
}
static struct pernet_operations nfnl_log_net_ops = {
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 2e0e835baf7..5352b2d2d5b 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -637,9 +637,6 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
if (queue->copy_mode == NFQNL_COPY_NONE)
return -EINVAL;
- if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(entry->skb))
- return __nfqnl_enqueue_packet(net, queue, entry);
-
skb = entry->skb;
switch (entry->pf) {
@@ -651,6 +648,9 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
break;
}
+ if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
+ return __nfqnl_enqueue_packet(net, queue, entry);
+
nf_bridge_adjust_skb_data(skb);
segs = skb_gso_segment(skb, 0);
/* Does not use PTR_ERR to limit the number of error codes that can be
@@ -1285,7 +1285,9 @@ static int __net_init nfnl_queue_net_init(struct net *net)
static void __net_exit nfnl_queue_net_exit(struct net *net)
{
+#ifdef CONFIG_PROC_FS
remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
+#endif
}
static struct pernet_operations nfnl_queue_net_ops = {
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c
index fe573f6c9e9..5ab24843370 100644
--- a/net/netfilter/xt_LOG.c
+++ b/net/netfilter/xt_LOG.c
@@ -466,7 +466,8 @@ log_packet_common(struct sbuff *m,
static void
-ipt_log_packet(u_int8_t pf,
+ipt_log_packet(struct net *net,
+ u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
@@ -475,7 +476,6 @@ ipt_log_packet(u_int8_t pf,
const char *prefix)
{
struct sbuff *m;
- struct net *net = dev_net(in ? in : out);
/* FIXME: Disabled from containers until syslog ns is supported */
if (!net_eq(net, &init_net))
@@ -737,7 +737,7 @@ static void dump_ipv6_packet(struct sbuff *m,
dump_sk_uid_gid(m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
- if (!recurse && skb->mark)
+ if (recurse && skb->mark)
sb_add(m, "MARK=0x%x ", skb->mark);
}
@@ -797,7 +797,8 @@ fallback:
}
static void
-ip6t_log_packet(u_int8_t pf,
+ip6t_log_packet(struct net *net,
+ u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
@@ -806,7 +807,6 @@ ip6t_log_packet(u_int8_t pf,
const char *prefix)
{
struct sbuff *m;
- struct net *net = dev_net(in ? in : out);
/* FIXME: Disabled from containers until syslog ns is supported */
if (!net_eq(net, &init_net))
@@ -833,17 +833,18 @@ log_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_log_info *loginfo = par->targinfo;
struct nf_loginfo li;
+ struct net *net = dev_net(par->in ? par->in : par->out);
li.type = NF_LOG_TYPE_LOG;
li.u.log.level = loginfo->level;
li.u.log.logflags = loginfo->logflags;
if (par->family == NFPROTO_IPV4)
- ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in,
+ ipt_log_packet(net, NFPROTO_IPV4, par->hooknum, skb, par->in,
par->out, &li, loginfo->prefix);
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
else if (par->family == NFPROTO_IPV6)
- ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in,
+ ip6t_log_packet(net, NFPROTO_IPV6, par->hooknum, skb, par->in,
par->out, &li, loginfo->prefix);
#endif
else
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index a17dd0f589b..fb7497c928a 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -26,13 +26,14 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_nflog_info *info = par->targinfo;
struct nf_loginfo li;
+ struct net *net = dev_net(par->in ? par->in : par->out);
li.type = NF_LOG_TYPE_ULOG;
li.u.ulog.copy_len = info->len;
li.u.ulog.group = info->group;
li.u.ulog.qthreshold = info->threshold;
- nfulnl_log_packet(par->family, par->hooknum, skb, par->in,
+ nfulnl_log_packet(net, par->family, par->hooknum, skb, par->in,
par->out, &li, info->prefix);
return XT_CONTINUE;
}
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index a75240f0d42..afaebc76693 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -125,6 +125,12 @@ tcpmss_mangle_packet(struct sk_buff *skb,
skb_put(skb, TCPOLEN_MSS);
+ /* RFC 879 states that the default MSS is 536 without specific
+ * knowledge that the destination host is prepared to accept larger.
+ * Since no MSS was provided, we MUST NOT set a value > 536.
+ */
+ newmss = min(newmss, (u16)536);
+
opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index 25fd1c4e1ee..1eb1a44bfd3 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -30,17 +30,28 @@ static inline unsigned int optlen(const u_int8_t *opt, unsigned int offset)
static unsigned int
tcpoptstrip_mangle_packet(struct sk_buff *skb,
- const struct xt_tcpoptstrip_target_info *info,
+ const struct xt_action_param *par,
unsigned int tcphoff, unsigned int minlen)
{
+ const struct xt_tcpoptstrip_target_info *info = par->targinfo;
unsigned int optl, i, j;
struct tcphdr *tcph;
u_int16_t n, o;
u_int8_t *opt;
+ int len;
+
+ /* This is a fragment, no TCP header is available */
+ if (par->fragoff != 0)
+ return XT_CONTINUE;
if (!skb_make_writable(skb, skb->len))
return NF_DROP;
+ len = skb->len - tcphoff;
+ if (len < (int)sizeof(struct tcphdr) ||
+ tcp_hdr(skb)->doff * 4 > len)
+ return NF_DROP;
+
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
opt = (u_int8_t *)tcph;
@@ -76,7 +87,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
static unsigned int
tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
- return tcpoptstrip_mangle_packet(skb, par->targinfo, ip_hdrlen(skb),
+ return tcpoptstrip_mangle_packet(skb, par, ip_hdrlen(skb),
sizeof(struct iphdr) + sizeof(struct tcphdr));
}
@@ -94,7 +105,7 @@ tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par)
if (tcphoff < 0)
return NF_DROP;
- return tcpoptstrip_mangle_packet(skb, par->targinfo, tcphoff,
+ return tcpoptstrip_mangle_packet(skb, par, tcphoff,
sizeof(*ipv6h) + sizeof(struct tcphdr));
}
#endif
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 49c5ff7f6dd..68ff29f6086 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -22,6 +22,7 @@
#include <net/ip6_fib.h>
#endif
+#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/xt_addrtype.h>
#include <linux/netfilter/x_tables.h>
@@ -33,12 +34,12 @@ MODULE_ALIAS("ip6t_addrtype");
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
- const struct in6_addr *addr)
+ const struct in6_addr *addr, u16 mask)
{
const struct nf_afinfo *afinfo;
struct flowi6 flow;
struct rt6_info *rt;
- u32 ret;
+ u32 ret = 0;
int route_err;
memset(&flow, 0, sizeof(flow));
@@ -49,12 +50,19 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
rcu_read_lock();
afinfo = nf_get_afinfo(NFPROTO_IPV6);
- if (afinfo != NULL)
+ if (afinfo != NULL) {
+ const struct nf_ipv6_ops *v6ops;
+
+ if (dev && (mask & XT_ADDRTYPE_LOCAL)) {
+ v6ops = nf_get_ipv6_ops();
+ if (v6ops && v6ops->chk_addr(net, addr, dev, true))
+ ret = XT_ADDRTYPE_LOCAL;
+ }
route_err = afinfo->route(net, (struct dst_entry **)&rt,
- flowi6_to_flowi(&flow), !!dev);
- else
+ flowi6_to_flowi(&flow), false);
+ } else {
route_err = 1;
-
+ }
rcu_read_unlock();
if (route_err)
@@ -62,15 +70,12 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
if (rt->rt6i_flags & RTF_REJECT)
ret = XT_ADDRTYPE_UNREACHABLE;
- else
- ret = 0;
- if (rt->rt6i_flags & RTF_LOCAL)
+ if (dev == NULL && rt->rt6i_flags & RTF_LOCAL)
ret |= XT_ADDRTYPE_LOCAL;
if (rt->rt6i_flags & RTF_ANYCAST)
ret |= XT_ADDRTYPE_ANYCAST;
-
dst_release(&rt->dst);
return ret;
}
@@ -90,7 +95,7 @@ static bool match_type6(struct net *net, const struct net_device *dev,
if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
XT_ADDRTYPE_UNREACHABLE) & mask)
- return !!(mask & match_lookup_rt6(net, dev, addr));
+ return !!(mask & match_lookup_rt6(net, dev, addr, mask));
return true;
}
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index d8d42433755..6bb1d42f0fa 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -245,6 +245,71 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
}
}
+/**
+ * netlbl_domhsh_validate - Validate a new domain mapping entry
+ * @entry: the entry to validate
+ *
+ * This function validates the new domain mapping entry to ensure that it is
+ * a valid entry. Returns zero on success, negative values on failure.
+ *
+ */
+static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
+{
+ struct netlbl_af4list *iter4;
+ struct netlbl_domaddr4_map *map4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct netlbl_af6list *iter6;
+ struct netlbl_domaddr6_map *map6;
+#endif /* IPv6 */
+
+ if (entry == NULL)
+ return -EINVAL;
+
+ switch (entry->type) {
+ case NETLBL_NLTYPE_UNLABELED:
+ if (entry->type_def.cipsov4 != NULL ||
+ entry->type_def.addrsel != NULL)
+ return -EINVAL;
+ break;
+ case NETLBL_NLTYPE_CIPSOV4:
+ if (entry->type_def.cipsov4 == NULL)
+ return -EINVAL;
+ break;
+ case NETLBL_NLTYPE_ADDRSELECT:
+ netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) {
+ map4 = netlbl_domhsh_addr4_entry(iter4);
+ switch (map4->type) {
+ case NETLBL_NLTYPE_UNLABELED:
+ if (map4->type_def.cipsov4 != NULL)
+ return -EINVAL;
+ break;
+ case NETLBL_NLTYPE_CIPSOV4:
+ if (map4->type_def.cipsov4 == NULL)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) {
+ map6 = netlbl_domhsh_addr6_entry(iter6);
+ switch (map6->type) {
+ case NETLBL_NLTYPE_UNLABELED:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+#endif /* IPv6 */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* Domain Hash Table Functions
*/
@@ -311,6 +376,10 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
struct netlbl_af6list *tmp6;
#endif /* IPv6 */
+ ret_val = netlbl_domhsh_validate(entry);
+ if (ret_val != 0)
+ return ret_val;
+
/* XXX - we can remove this RCU read lock as the spinlock protects the
* entire function, but before we do we need to fixup the
* netlbl_af[4,6]list RCU functions to do "the right thing" with
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 12ac6b47a35..57ee84d2147 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -371,7 +371,7 @@ static int netlink_mmap(struct file *file, struct socket *sock,
err = 0;
out:
mutex_unlock(&nlk->pg_vec_lock);
- return 0;
+ return err;
}
static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
@@ -747,7 +747,7 @@ static void netlink_skb_destructor(struct sk_buff *skb)
atomic_dec(&ring->pending);
sock_put(sk);
- skb->data = NULL;
+ skb->head = NULL;
}
#endif
if (skb->sk != NULL)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8ec1bca7f85..20a1bd0e654 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2851,12 +2851,11 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
return -EOPNOTSUPP;
uaddr->sa_family = AF_PACKET;
+ memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
if (dev)
- strncpy(uaddr->sa_data, dev->name, 14);
- else
- memset(uaddr->sa_data, 0, 14);
+ strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
rcu_read_unlock();
*uaddr_len = sizeof(*uaddr);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 823463adbd2..189e3c5b3d0 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -231,14 +231,14 @@ override:
}
if (R_tab) {
police->rate_present = true;
- psched_ratecfg_precompute(&police->rate, R_tab->rate.rate);
+ psched_ratecfg_precompute(&police->rate, &R_tab->rate);
qdisc_put_rtab(R_tab);
} else {
police->rate_present = false;
}
if (P_tab) {
police->peak_present = true;
- psched_ratecfg_precompute(&police->peak, P_tab->rate.rate);
+ psched_ratecfg_precompute(&police->peak, &P_tab->rate);
qdisc_put_rtab(P_tab);
} else {
police->peak_present = false;
@@ -376,9 +376,9 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
};
if (police->rate_present)
- opt.rate.rate = psched_ratecfg_getrate(&police->rate);
+ psched_ratecfg_getrate(&opt.rate, &police->rate);
if (police->peak_present)
- opt.peakrate.rate = psched_ratecfg_getrate(&police->peak);
+ psched_ratecfg_getrate(&opt.peakrate, &police->peak);
if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
goto nla_put_failure;
if (police->tcfp_result &&
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 2b935e7cfe7..281c1bded1f 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -291,17 +291,18 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
{
struct qdisc_rate_table *rtab;
+ if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
+ nla_len(tab) != TC_RTAB_SIZE)
+ return NULL;
+
for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
- if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
+ if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
+ !memcmp(&rtab->data, nla_data(tab), 1024)) {
rtab->refcnt++;
return rtab;
}
}
- if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
- nla_len(tab) != TC_RTAB_SIZE)
- return NULL;
-
rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
if (rtab) {
rtab->rate = *r;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index eac7e0ee23c..20224086cc2 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -898,14 +898,16 @@ void dev_shutdown(struct net_device *dev)
WARN_ON(timer_pending(&dev->watchdog_timer));
}
-void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)
+void psched_ratecfg_precompute(struct psched_ratecfg *r,
+ const struct tc_ratespec *conf)
{
u64 factor;
u64 mult;
int shift;
- r->rate_bps = (u64)rate << 3;
- r->shift = 0;
+ memset(r, 0, sizeof(*r));
+ r->overhead = conf->overhead;
+ r->rate_bps = (u64)conf->rate << 3;
r->mult = 1;
/*
* Calibrate mult, shift so that token counting is accurate
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 79b1876b6cd..adaedd79389 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -109,7 +109,7 @@ struct htb_class {
} un;
struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
struct rb_node pq_node; /* node for event queue */
- psched_time_t pq_key;
+ s64 pq_key;
int prio_activity; /* for which prios are we active */
enum htb_cmode cmode; /* current mode of the class */
@@ -121,10 +121,10 @@ struct htb_class {
/* token bucket parameters */
struct psched_ratecfg rate;
struct psched_ratecfg ceil;
- s64 buffer, cbuffer; /* token bucket depth/rate */
- psched_tdiff_t mbuffer; /* max wait time */
- s64 tokens, ctokens; /* current number of tokens */
- psched_time_t t_c; /* checkpoint time */
+ s64 buffer, cbuffer; /* token bucket depth/rate */
+ s64 mbuffer; /* max wait time */
+ s64 tokens, ctokens; /* current number of tokens */
+ s64 t_c; /* checkpoint time */
};
struct htb_sched {
@@ -141,15 +141,15 @@ struct htb_sched {
struct rb_root wait_pq[TC_HTB_MAXDEPTH];
/* time of nearest event per level (row) */
- psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
+ s64 near_ev_cache[TC_HTB_MAXDEPTH];
int defcls; /* class where unclassified flows go to */
/* filters for qdisc itself */
struct tcf_proto *filter_list;
- int rate2quantum; /* quant = rate / rate2quantum */
- psched_time_t now; /* cached dequeue time */
+ int rate2quantum; /* quant = rate / rate2quantum */
+ s64 now; /* cached dequeue time */
struct qdisc_watchdog watchdog;
/* non shaped skbs; let them go directly thru */
@@ -664,8 +664,8 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
* next pending event (0 for no event in pq, q->now for too many events).
* Note: Applied are events whose have cl->pq_key <= q->now.
*/
-static psched_time_t htb_do_events(struct htb_sched *q, int level,
- unsigned long start)
+static s64 htb_do_events(struct htb_sched *q, int level,
+ unsigned long start)
{
/* don't run for longer than 2 jiffies; 2 is used instead of
* 1 to simplify things when jiffy is going to be incremented
@@ -857,7 +857,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
struct sk_buff *skb;
struct htb_sched *q = qdisc_priv(sch);
int level;
- psched_time_t next_event;
+ s64 next_event;
unsigned long start_at;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
@@ -880,7 +880,7 @@ ok:
for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
/* common case optimization - skip event handler quickly */
int m;
- psched_time_t event;
+ s64 event;
if (q->now >= q->near_ev_cache[level]) {
event = htb_do_events(q, level, start_at);
@@ -1090,9 +1090,9 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
memset(&opt, 0, sizeof(opt));
- opt.rate.rate = psched_ratecfg_getrate(&cl->rate);
+ psched_ratecfg_getrate(&opt.rate, &cl->rate);
opt.buffer = PSCHED_NS2TICKS(cl->buffer);
- opt.ceil.rate = psched_ratecfg_getrate(&cl->ceil);
+ psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
opt.quantum = cl->quantum;
opt.prio = cl->prio;
@@ -1117,8 +1117,8 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
if (!cl->level && cl->un.leaf.q)
cl->qstats.qlen = cl->un.leaf.q->q.qlen;
- cl->xstats.tokens = cl->tokens;
- cl->xstats.ctokens = cl->ctokens;
+ cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
+ cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
@@ -1200,7 +1200,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
parent->tokens = parent->buffer;
parent->ctokens = parent->cbuffer;
- parent->t_c = psched_get_time();
+ parent->t_c = ktime_to_ns(ktime_get());
parent->cmode = HTB_CAN_SEND;
}
@@ -1417,8 +1417,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
/* set class to be in HTB_CAN_SEND state */
cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
- cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
- cl->t_c = psched_get_time();
+ cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
+ cl->t_c = ktime_to_ns(ktime_get());
cl->cmode = HTB_CAN_SEND;
/* attach to the hash list and parent's family */
@@ -1459,8 +1459,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
cl->prio = TC_HTB_NUMPRIO - 1;
}
- psched_ratecfg_precompute(&cl->rate, hopt->rate.rate);
- psched_ratecfg_precompute(&cl->ceil, hopt->ceil.rate);
+ psched_ratecfg_precompute(&cl->rate, &hopt->rate);
+ psched_ratecfg_precompute(&cl->ceil, &hopt->ceil);
cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index c8388f3c342..e478d316602 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -298,9 +298,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
q->tokens = q->buffer;
q->ptokens = q->mtu;
- psched_ratecfg_precompute(&q->rate, rtab->rate.rate);
+ psched_ratecfg_precompute(&q->rate, &rtab->rate);
if (ptab) {
- psched_ratecfg_precompute(&q->peak, ptab->rate.rate);
+ psched_ratecfg_precompute(&q->peak, &ptab->rate);
q->peak_present = true;
} else {
q->peak_present = false;
@@ -350,9 +350,9 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
goto nla_put_failure;
opt.limit = q->limit;
- opt.rate.rate = psched_ratecfg_getrate(&q->rate);
+ psched_ratecfg_getrate(&opt.rate, &q->rate);
if (q->peak_present)
- opt.peakrate.rate = psched_ratecfg_getrate(&q->peak);
+ psched_ratecfg_getrate(&opt.peakrate, &q->peak);
else
memset(&opt.peakrate, 0, sizeof(opt.peakrate));
opt.mtu = PSCHED_NS2TICKS(q->mtu);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 32a4625fef7..be35e2dbcc9 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -206,6 +206,8 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary,
*/
void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
{
+ memset(q, 0, sizeof(struct sctp_outq));
+
q->asoc = asoc;
INIT_LIST_HEAD(&q->out_chunk_list);
INIT_LIST_HEAD(&q->control_chunk_list);
@@ -213,11 +215,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
INIT_LIST_HEAD(&q->sacked);
INIT_LIST_HEAD(&q->abandoned);
- q->fast_rtx = 0;
- q->outstanding_bytes = 0;
q->empty = 1;
- q->cork = 0;
- q->out_qlen = 0;
}
/* Free the outqueue structure and any related pending chunks.
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f631c5ff4db..6abb1caf983 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4003,6 +4003,12 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
/* Release our hold on the endpoint. */
sp = sctp_sk(sk);
+ /* This could happen during socket init, thus we bail out
+ * early, since the rest of the below is not setup either.
+ */
+ if (sp->ep == NULL)
+ return;
+
if (sp->do_auto_asconf) {
sp->do_auto_asconf = 0;
list_del(&sp->auto_asconf_list);
diff --git a/net/socket.c b/net/socket.c
index b416093997d..4ca1526db75 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1956,7 +1956,7 @@ struct used_address {
unsigned int name_len;
};
-static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
struct msghdr *msg_sys, unsigned int flags,
struct used_address *used_address)
{
@@ -2071,22 +2071,30 @@ out:
* BSD sendmsg interface
*/
-SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
+long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
{
int fput_needed, err;
struct msghdr msg_sys;
- struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ struct socket *sock;
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
- err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
+ err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
fput_light(sock->file, fput_needed);
out:
return err;
}
+SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
+{
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+ return __sys_sendmsg(fd, msg, flags);
+}
+
/*
* Linux sendmmsg interface
*/
@@ -2117,15 +2125,16 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
while (datagrams < vlen) {
if (MSG_CMSG_COMPAT & flags) {
- err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
- &msg_sys, flags, &used_address);
+ err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
+ &msg_sys, flags, &used_address);
if (err < 0)
break;
err = __put_user(err, &compat_entry->msg_len);
++compat_entry;
} else {
- err = __sys_sendmsg(sock, (struct msghdr __user *)entry,
- &msg_sys, flags, &used_address);
+ err = ___sys_sendmsg(sock,
+ (struct msghdr __user *)entry,
+ &msg_sys, flags, &used_address);
if (err < 0)
break;
err = put_user(err, &entry->msg_len);
@@ -2149,10 +2158,12 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags)
{
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
return __sys_sendmmsg(fd, mmsg, vlen, flags);
}
-static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
struct msghdr *msg_sys, unsigned int flags, int nosec)
{
struct compat_msghdr __user *msg_compat =
@@ -2244,23 +2255,31 @@ out:
* BSD recvmsg interface
*/
-SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
- unsigned int, flags)
+long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags)
{
int fput_needed, err;
struct msghdr msg_sys;
- struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ struct socket *sock;
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
- err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0);
+ err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0);
fput_light(sock->file, fput_needed);
out:
return err;
}
+SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
+ unsigned int, flags)
+{
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+ return __sys_recvmsg(fd, msg, flags);
+}
+
/*
* Linux recvmmsg interface
*/
@@ -2298,17 +2317,18 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
* No need to ask LSM for more than the first datagram.
*/
if (MSG_CMSG_COMPAT & flags) {
- err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
- &msg_sys, flags & ~MSG_WAITFORONE,
- datagrams);
+ err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
+ &msg_sys, flags & ~MSG_WAITFORONE,
+ datagrams);
if (err < 0)
break;
err = __put_user(err, &compat_entry->msg_len);
++compat_entry;
} else {
- err = __sys_recvmsg(sock, (struct msghdr __user *)entry,
- &msg_sys, flags & ~MSG_WAITFORONE,
- datagrams);
+ err = ___sys_recvmsg(sock,
+ (struct msghdr __user *)entry,
+ &msg_sys, flags & ~MSG_WAITFORONE,
+ datagrams);
if (err < 0)
break;
err = put_user(err, &entry->msg_len);
@@ -2375,6 +2395,9 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
int datagrams;
struct timespec timeout_sys;
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+
if (!timeout)
return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL);
@@ -2412,7 +2435,7 @@ static const unsigned char nargs[21] = {
SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
{
- unsigned long a[6];
+ unsigned long a[AUDITSC_ARGS];
unsigned long a0, a1;
int err;
unsigned int len;
@@ -2428,7 +2451,9 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
if (copy_from_user(a, args, len))
return -EFAULT;
- audit_socketcall(nargs[call] / sizeof(unsigned long), a);
+ err = audit_socketcall(nargs[call] / sizeof(unsigned long), a);
+ if (err)
+ return err;
a0 = a[0];
a1 = a[1];
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 7da6b457f66..fc2f78d6a9b 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -52,6 +52,8 @@
#include <linux/sunrpc/gss_api.h>
#include <asm/uaccess.h>
+#include "../netns.h"
+
static const struct rpc_authops authgss_ops;
static const struct rpc_credops gss_credops;
@@ -85,8 +87,6 @@ struct gss_auth {
};
/* pipe_version >= 0 if and only if someone has a pipe open. */
-static int pipe_version = -1;
-static atomic_t pipe_users = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(pipe_version_lock);
static struct rpc_wait_queue pipe_version_rpc_waitqueue;
static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
@@ -266,24 +266,27 @@ struct gss_upcall_msg {
char databuf[UPCALL_BUF_LEN];
};
-static int get_pipe_version(void)
+static int get_pipe_version(struct net *net)
{
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
int ret;
spin_lock(&pipe_version_lock);
- if (pipe_version >= 0) {
- atomic_inc(&pipe_users);
- ret = pipe_version;
+ if (sn->pipe_version >= 0) {
+ atomic_inc(&sn->pipe_users);
+ ret = sn->pipe_version;
} else
ret = -EAGAIN;
spin_unlock(&pipe_version_lock);
return ret;
}
-static void put_pipe_version(void)
+static void put_pipe_version(struct net *net)
{
- if (atomic_dec_and_lock(&pipe_users, &pipe_version_lock)) {
- pipe_version = -1;
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+
+ if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) {
+ sn->pipe_version = -1;
spin_unlock(&pipe_version_lock);
}
}
@@ -291,9 +294,10 @@ static void put_pipe_version(void)
static void
gss_release_msg(struct gss_upcall_msg *gss_msg)
{
+ struct net *net = rpc_net_ns(gss_msg->auth->client);
if (!atomic_dec_and_test(&gss_msg->count))
return;
- put_pipe_version();
+ put_pipe_version(net);
BUG_ON(!list_empty(&gss_msg->list));
if (gss_msg->ctx != NULL)
gss_put_ctx(gss_msg->ctx);
@@ -439,7 +443,10 @@ static void gss_encode_msg(struct gss_upcall_msg *gss_msg,
struct rpc_clnt *clnt,
const char *service_name)
{
- if (pipe_version == 0)
+ struct net *net = rpc_net_ns(clnt);
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+
+ if (sn->pipe_version == 0)
gss_encode_v0_msg(gss_msg);
else /* pipe_version == 1 */
gss_encode_v1_msg(gss_msg, clnt, service_name);
@@ -455,7 +462,7 @@ gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt,
gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
if (gss_msg == NULL)
return ERR_PTR(-ENOMEM);
- vers = get_pipe_version();
+ vers = get_pipe_version(rpc_net_ns(clnt));
if (vers < 0) {
kfree(gss_msg);
return ERR_PTR(vers);
@@ -559,24 +566,34 @@ out:
static inline int
gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
{
+ struct net *net = rpc_net_ns(gss_auth->client);
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
struct rpc_pipe *pipe;
struct rpc_cred *cred = &gss_cred->gc_base;
struct gss_upcall_msg *gss_msg;
+ unsigned long timeout;
DEFINE_WAIT(wait);
- int err = 0;
+ int err;
dprintk("RPC: %s for uid %u\n",
__func__, from_kuid(&init_user_ns, cred->cr_uid));
retry:
+ err = 0;
+ /* Default timeout is 15s unless we know that gssd is not running */
+ timeout = 15 * HZ;
+ if (!sn->gssd_running)
+ timeout = HZ >> 2;
gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
if (PTR_ERR(gss_msg) == -EAGAIN) {
err = wait_event_interruptible_timeout(pipe_version_waitqueue,
- pipe_version >= 0, 15*HZ);
- if (pipe_version < 0) {
+ sn->pipe_version >= 0, timeout);
+ if (sn->pipe_version < 0) {
+ if (err == 0)
+ sn->gssd_running = 0;
warn_gssd();
err = -EACCES;
}
- if (err)
+ if (err < 0)
goto out;
goto retry;
}
@@ -707,20 +724,22 @@ out:
static int gss_pipe_open(struct inode *inode, int new_version)
{
+ struct net *net = inode->i_sb->s_fs_info;
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
int ret = 0;
spin_lock(&pipe_version_lock);
- if (pipe_version < 0) {
+ if (sn->pipe_version < 0) {
/* First open of any gss pipe determines the version: */
- pipe_version = new_version;
+ sn->pipe_version = new_version;
rpc_wake_up(&pipe_version_rpc_waitqueue);
wake_up(&pipe_version_waitqueue);
- } else if (pipe_version != new_version) {
+ } else if (sn->pipe_version != new_version) {
/* Trying to open a pipe of a different version */
ret = -EBUSY;
goto out;
}
- atomic_inc(&pipe_users);
+ atomic_inc(&sn->pipe_users);
out:
spin_unlock(&pipe_version_lock);
return ret;
@@ -740,6 +759,7 @@ static int gss_pipe_open_v1(struct inode *inode)
static void
gss_pipe_release(struct inode *inode)
{
+ struct net *net = inode->i_sb->s_fs_info;
struct rpc_pipe *pipe = RPC_I(inode)->pipe;
struct gss_upcall_msg *gss_msg;
@@ -758,7 +778,7 @@ restart:
}
spin_unlock(&pipe->lock);
- put_pipe_version();
+ put_pipe_version(net);
}
static void
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 5c4c61d527e..357f613df7f 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -21,16 +21,6 @@
#include <linux/sunrpc/svcauth.h>
#include "gss_rpc_xdr.h"
-static bool gssx_check_pointer(struct xdr_stream *xdr)
-{
- __be32 *p;
-
- p = xdr_reserve_space(xdr, 4);
- if (unlikely(p == NULL))
- return -ENOSPC;
- return *p?true:false;
-}
-
static int gssx_enc_bool(struct xdr_stream *xdr, int v)
{
__be32 *p;
@@ -264,25 +254,27 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
if (unlikely(p == NULL))
return -ENOSPC;
count = be32_to_cpup(p++);
- if (count != 0) {
- /* we recognize only 1 currently: CREDS_VALUE */
- oa->count = 1;
+ if (!count)
+ return 0;
- oa->data = kmalloc(sizeof(struct gssx_option), GFP_KERNEL);
- if (!oa->data)
- return -ENOMEM;
+ /* we recognize only 1 currently: CREDS_VALUE */
+ oa->count = 1;
- creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
- if (!creds) {
- kfree(oa->data);
- return -ENOMEM;
- }
+ oa->data = kmalloc(sizeof(struct gssx_option), GFP_KERNEL);
+ if (!oa->data)
+ return -ENOMEM;
- oa->data[0].option.data = CREDS_VALUE;
- oa->data[0].option.len = sizeof(CREDS_VALUE);
- oa->data[0].value.data = (void *)creds;
- oa->data[0].value.len = 0;
+ creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
+ if (!creds) {
+ kfree(oa->data);
+ return -ENOMEM;
}
+
+ oa->data[0].option.data = CREDS_VALUE;
+ oa->data[0].option.len = sizeof(CREDS_VALUE);
+ oa->data[0].value.data = (void *)creds;
+ oa->data[0].value.len = 0;
+
for (i = 0; i < count; i++) {
gssx_buffer dummy = { 0, NULL };
u32 length;
@@ -800,6 +792,7 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct gssx_res_accept_sec_context *res)
{
+ u32 value_follows;
int err;
/* res->status */
@@ -808,7 +801,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
return err;
/* res->context_handle */
- if (gssx_check_pointer(xdr)) {
+ err = gssx_dec_bool(xdr, &value_follows);
+ if (err)
+ return err;
+ if (value_follows) {
err = gssx_dec_ctx(xdr, res->context_handle);
if (err)
return err;
@@ -817,7 +813,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
}
/* res->output_token */
- if (gssx_check_pointer(xdr)) {
+ err = gssx_dec_bool(xdr, &value_follows);
+ if (err)
+ return err;
+ if (value_follows) {
err = gssx_dec_buffer(xdr, res->output_token);
if (err)
return err;
@@ -826,7 +825,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
}
/* res->delegated_cred_handle */
- if (gssx_check_pointer(xdr)) {
+ err = gssx_dec_bool(xdr, &value_follows);
+ if (err)
+ return err;
+ if (value_follows) {
/* we do not support upcall servers sending this data. */
return -EINVAL;
}
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 871c73c9216..29b4ba93ab3 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1287,7 +1287,7 @@ static bool use_gss_proxy(struct net *net)
#ifdef CONFIG_PROC_FS
-static bool set_gss_proxy(struct net *net, int type)
+static int set_gss_proxy(struct net *net, int type)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
int ret = 0;
@@ -1317,10 +1317,12 @@ static inline bool gssp_ready(struct sunrpc_net *sn)
return false;
}
-static int wait_for_gss_proxy(struct net *net)
+static int wait_for_gss_proxy(struct net *net, struct file *file)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+ if (file->f_flags & O_NONBLOCK && !gssp_ready(sn))
+ return -EAGAIN;
return wait_event_interruptible(sn->gssp_wq, gssp_ready(sn));
}
@@ -1362,7 +1364,7 @@ static ssize_t read_gssp(struct file *file, char __user *buf,
size_t len;
int ret;
- ret = wait_for_gss_proxy(net);
+ ret = wait_for_gss_proxy(net, file);
if (ret)
return ret;
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
index 7111a4c9113..74d948f5d5a 100644
--- a/net/sunrpc/netns.h
+++ b/net/sunrpc/netns.h
@@ -28,7 +28,11 @@ struct sunrpc_net {
wait_queue_head_t gssp_wq;
struct rpc_clnt *gssp_clnt;
int use_gss_proxy;
+ int pipe_version;
+ atomic_t pipe_users;
struct proc_dir_entry *use_gssp_proc;
+
+ unsigned int gssd_running;
};
extern int sunrpc_net_id;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index a9129f8d707..e7ce4b3eb0b 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -216,11 +216,14 @@ rpc_destroy_inode(struct inode *inode)
static int
rpc_pipe_open(struct inode *inode, struct file *filp)
{
+ struct net *net = inode->i_sb->s_fs_info;
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
struct rpc_pipe *pipe;
int first_open;
int res = -ENXIO;
mutex_lock(&inode->i_mutex);
+ sn->gssd_running = 1;
pipe = RPC_I(inode)->pipe;
if (pipe == NULL)
goto out;
@@ -1069,6 +1072,8 @@ void rpc_pipefs_init_net(struct net *net)
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
mutex_init(&sn->pipefs_sb_lock);
+ sn->gssd_running = 1;
+ sn->pipe_version = -1;
}
/*
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index f8529fc8e54..5356b120dbf 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -324,11 +324,17 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
* Note: If the task is ASYNC, and is being made runnable after sitting on an
* rpc_wait_queue, this must be called with the queue spinlock held to protect
* the wait queue operation.
+ * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
+ * which is needed to ensure that __rpc_execute() doesn't loop (due to the
+ * lockless RPC_IS_QUEUED() test) before we've had a chance to test
+ * the RPC_TASK_RUNNING flag.
*/
static void rpc_make_runnable(struct rpc_task *task)
{
+ bool need_wakeup = !rpc_test_and_set_running(task);
+
rpc_clear_queued(task);
- if (rpc_test_and_set_running(task))
+ if (!need_wakeup)
return;
if (RPC_IS_ASYNC(task)) {
INIT_WORK(&task->u.tk_work, rpc_async_schedule);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index c3f9e1ef7f5..06bdf5a1082 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -810,11 +810,15 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
goto badcred;
argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */
argv->iov_len -= slen*4;
-
+ /*
+ * Note: we skip uid_valid()/gid_valid() checks here for
+ * backwards compatibility with clients that use -1 id's.
+ * Instead, -1 uid or gid is later mapped to the
+ * (export-specific) anonymous id by nfsd_setuser.
+ * Supplementary gid's will be left alone.
+ */
cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */
cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */
- if (!uid_valid(cred->cr_uid) || !gid_valid(cred->cr_gid))
- goto badcred;
slen = svc_getnl(argv); /* gids length */
if (slen > 16 || (len -= (slen + 2)*4) < 0)
goto badcred;
@@ -823,8 +827,6 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
return SVC_CLOSE;
for (i = 0; i < slen; i++) {
kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
- if (!gid_valid(kgid))
- goto badcred;
GROUP_AT(cred->cr_group_info, i) = kgid;
}
if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index bcfda8921b5..0cf003dfa8f 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -64,6 +64,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
if (unlikely(x->km.state != XFRM_STATE_VALID)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
+ err = -EINVAL;
goto error;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 23cea0f7433..ea970b8002a 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2557,11 +2557,12 @@ static void __xfrm_garbage_collect(struct net *net)
}
}
-static void xfrm_garbage_collect(struct net *net)
+void xfrm_garbage_collect(struct net *net)
{
flow_cache_flush();
__xfrm_garbage_collect(net);
}
+EXPORT_SYMBOL(xfrm_garbage_collect);
static void xfrm_garbage_collect_deferred(struct net *net)
{
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index aa778748c56..3f565e495ac 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1681,6 +1681,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
out:
xfrm_pol_put(xp);
+ if (delete && err == 0)
+ xfrm_garbage_collect(net);
return err;
}