diff options
Diffstat (limited to 'net')
103 files changed, 1129 insertions, 731 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index b7889782047e..c1b92cab46c7 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -163,7 +163,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, goto err_unlock; } - rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, + rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats, smp_processor_id()); rx_stats->rx_packets++; rx_stats->rx_bytes += skb->len; diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 4dd873e3a1bb..be1cb909d8c0 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -42,6 +42,8 @@ #include <net/9p/client.h> #include <net/9p/transport.h> +#include <linux/syscalls.h> /* killme */ + #define P9_PORT 564 #define MAX_SOCK_BUF (64*1024) #define MAXPOLLWADDR 2 @@ -788,24 +790,41 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd) static int p9_socket_open(struct p9_client *client, struct socket *csocket) { - int fd, ret; + struct p9_trans_fd *p; + int ret, fd; + + p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL); + if (!p) + return -ENOMEM; csocket->sk->sk_allocation = GFP_NOIO; fd = sock_map_fd(csocket, 0); if (fd < 0) { P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n"); + sock_release(csocket); + kfree(p); return fd; } - ret = p9_fd_open(client, fd, fd); - if (ret < 0) { - P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to open fd\n"); + get_file(csocket->file); + get_file(csocket->file); + p->wr = p->rd = csocket->file; + client->trans = p; + client->status = Connected; + + sys_close(fd); /* still racy */ + + p->rd->f_flags |= O_NONBLOCK; + + p->conn = p9_conn_create(client); + if (IS_ERR(p->conn)) { + ret = PTR_ERR(p->conn); + p->conn = NULL; + kfree(p); + sockfd_put(csocket); sockfd_put(csocket); return ret; } - - ((struct p9_trans_fd *)client->trans)->rd->f_flags |= O_NONBLOCK; - return 0; } @@ -883,7 +902,6 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) struct socket *csocket; struct sockaddr_in sin_server; struct p9_fd_opts opts; - struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */ err = parse_opts(args, &opts); if (err < 0) @@ -897,12 +915,11 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) sin_server.sin_family = AF_INET; sin_server.sin_addr.s_addr = in_aton(addr); sin_server.sin_port = htons(opts.port); - sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket); + err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket); - if (!csocket) { + if (err) { P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n"); - err = -EIO; - goto error; + return err; } err = csocket->ops->connect(csocket, @@ -912,30 +929,11 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem connecting socket to %s\n", addr); - goto error; - } - - err = p9_socket_open(client, csocket); - if (err < 0) - goto error; - - p = (struct p9_trans_fd *) client->trans; - p->conn = p9_conn_create(client); - if (IS_ERR(p->conn)) { - err = PTR_ERR(p->conn); - p->conn = NULL; - goto error; - } - - return 0; - -error: - if (csocket) sock_release(csocket); + return err; + } - kfree(p); - - return err; + return p9_socket_open(client, csocket); } static int @@ -944,49 +942,33 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) int err; struct socket *csocket; struct sockaddr_un sun_server; - struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */ csocket = NULL; if (strlen(addr) > UNIX_PATH_MAX) { P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n", addr); - err = -ENAMETOOLONG; - goto error; + return -ENAMETOOLONG; } sun_server.sun_family = PF_UNIX; strcpy(sun_server.sun_path, addr); - sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket); + err = sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket); + if (err < 0) { + P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n"); + return err; + } err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server, sizeof(struct sockaddr_un) - 1, 0); if (err < 0) { P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem connecting socket: %s: %d\n", addr, err); - goto error; - } - - err = p9_socket_open(client, csocket); - if (err < 0) - goto error; - - p = (struct p9_trans_fd *) client->trans; - p->conn = p9_conn_create(client); - if (IS_ERR(p->conn)) { - err = PTR_ERR(p->conn); - p->conn = NULL; - goto error; - } - - return 0; - -error: - if (csocket) sock_release(csocket); + return err; + } - kfree(p); - return err; + return p9_socket_open(client, csocket); } static int @@ -994,7 +976,7 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args) { int err; struct p9_fd_opts opts; - struct p9_trans_fd *p = NULL; /* this get allocated in p9_fd_open */ + struct p9_trans_fd *p; parse_opts(args, &opts); @@ -1005,21 +987,19 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args) err = p9_fd_open(client, opts.rfd, opts.wfd); if (err < 0) - goto error; + return err; p = (struct p9_trans_fd *) client->trans; p->conn = p9_conn_create(client); if (IS_ERR(p->conn)) { err = PTR_ERR(p->conn); p->conn = NULL; - goto error; + fput(p->rd); + fput(p->wr); + return err; } return 0; - -error: - kfree(p); - return err; } static struct p9_trans_module p9_tcp_trans = { diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index 9d4adfd22757..f2b3b56aa779 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c @@ -819,7 +819,7 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev, ma = &ifa->address; else { /* We need to make a copy of the entry. */ da.s_node = sa.s_node; - da.s_net = da.s_net; + da.s_net = sa.s_net; ma = &da; } diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 26a646d4eb32..c9230c398697 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -554,6 +554,12 @@ static const struct net_device_ops br2684_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; +static const struct net_device_ops br2684_netdev_ops_routed = { + .ndo_start_xmit = br2684_start_xmit, + .ndo_set_mac_address = br2684_mac_addr, + .ndo_change_mtu = eth_change_mtu +}; + static void br2684_setup(struct net_device *netdev) { struct br2684_dev *brdev = BRPRIV(netdev); @@ -569,11 +575,10 @@ static void br2684_setup(struct net_device *netdev) static void br2684_setup_routed(struct net_device *netdev) { struct br2684_dev *brdev = BRPRIV(netdev); - brdev->net_dev = netdev; + brdev->net_dev = netdev; netdev->hard_header_len = 0; - - netdev->netdev_ops = &br2684_netdev_ops; + netdev->netdev_ops = &br2684_netdev_ops_routed; netdev->addr_len = 0; netdev->mtu = 1500; netdev->type = ARPHRD_PPP; diff --git a/net/atm/lec.c b/net/atm/lec.c index b2d644560323..42749b7b917c 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -62,7 +62,6 @@ static int lec_open(struct net_device *dev); static netdev_tx_t lec_start_xmit(struct sk_buff *skb, struct net_device *dev); static int lec_close(struct net_device *dev); -static void lec_init(struct net_device *dev); static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, const unsigned char *mac_addr); static int lec_arp_remove(struct lec_priv *priv, @@ -670,13 +669,6 @@ static const struct net_device_ops lec_netdev_ops = { .ndo_set_multicast_list = lec_set_multicast_list, }; - -static void lec_init(struct net_device *dev) -{ - dev->netdev_ops = &lec_netdev_ops; - printk("%s: Initialized!\n", dev->name); -} - static const unsigned char lec_ctrl_magic[] = { 0xff, 0x00, @@ -893,6 +885,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) dev_lec[i] = alloc_etherdev(size); if (!dev_lec[i]) return -ENOMEM; + dev_lec[i]->netdev_ops = &lec_netdev_ops; snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i); if (register_netdev(dev_lec[i])) { free_netdev(dev_lec[i]); @@ -901,7 +894,6 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) priv = netdev_priv(dev_lec[i]); priv->is_trdev = is_trdev; - lec_init(dev_lec[i]); } else { priv = netdev_priv(dev_lec[i]); if (priv->lecd) diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c index bf706f83a5c9..14912600ec57 100644 --- a/net/ax25/ax25_out.c +++ b/net/ax25/ax25_out.c @@ -92,6 +92,12 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax2 #endif } + /* + * There is one ref for the state machine; a caller needs + * one more to put it back, just like with the existing one. + */ + ax25_cb_hold(ax25); + ax25_cb_add(ax25); ax25->state = AX25_STATE_1; diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 569750010fd3..6cf526d06e21 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -243,6 +243,39 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb) input_sync(dev); } +static int __hidp_send_ctrl_message(struct hidp_session *session, + unsigned char hdr, unsigned char *data, int size) +{ + struct sk_buff *skb; + + BT_DBG("session %p data %p size %d", session, data, size); + + if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { + BT_ERR("Can't allocate memory for new frame"); + return -ENOMEM; + } + + *skb_put(skb, 1) = hdr; + if (data && size > 0) + memcpy(skb_put(skb, size), data, size); + + skb_queue_tail(&session->ctrl_transmit, skb); + + return 0; +} + +static inline int hidp_send_ctrl_message(struct hidp_session *session, + unsigned char hdr, unsigned char *data, int size) +{ + int err; + + err = __hidp_send_ctrl_message(session, hdr, data, size); + + hidp_schedule(session); + + return err; +} + static int hidp_queue_report(struct hidp_session *session, unsigned char *data, int size) { @@ -282,7 +315,9 @@ static int hidp_send_report(struct hidp_session *session, struct hid_report *rep static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count) { - if (hidp_queue_report(hid->driver_data, data, count)) + if (hidp_send_ctrl_message(hid->driver_data, + HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE, + data, count)) return -ENOMEM; return count; } @@ -307,39 +342,6 @@ static inline void hidp_del_timer(struct hidp_session *session) del_timer(&session->timer); } -static int __hidp_send_ctrl_message(struct hidp_session *session, - unsigned char hdr, unsigned char *data, int size) -{ - struct sk_buff *skb; - - BT_DBG("session %p data %p size %d", session, data, size); - - if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { - BT_ERR("Can't allocate memory for new frame"); - return -ENOMEM; - } - - *skb_put(skb, 1) = hdr; - if (data && size > 0) - memcpy(skb_put(skb, size), data, size); - - skb_queue_tail(&session->ctrl_transmit, skb); - - return 0; -} - -static inline int hidp_send_ctrl_message(struct hidp_session *session, - unsigned char hdr, unsigned char *data, int size) -{ - int err; - - err = __hidp_send_ctrl_message(session, hdr, data, size); - - hidp_schedule(session); - - return err; -} - static void hidp_process_handshake(struct hidp_session *session, unsigned char param) { @@ -770,7 +772,7 @@ static int hidp_setup_hid(struct hidp_session *session, hid = hid_allocate_device(); if (IS_ERR(hid)) - return PTR_ERR(session->hid); + return PTR_ERR(hid); session->hid = hid; session->req = req; diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 5129b88c8e5b..400efa26ddba 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -1212,6 +1212,7 @@ static void l2cap_monitor_timeout(unsigned long arg) bh_lock_sock(sk); if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) { l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk); + bh_unlock_sock(sk); return; } @@ -1367,7 +1368,6 @@ static int l2cap_ertm_send(struct sock *sk) while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) && !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) { - tx_skb = skb_clone(skb, GFP_ATOMIC); if (pi->remote_max_tx && bt_cb(skb)->retries == pi->remote_max_tx) { @@ -1375,6 +1375,8 @@ static int l2cap_ertm_send(struct sock *sk) break; } + tx_skb = skb_clone(skb, GFP_ATOMIC); + bt_cb(skb)->retries++; control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); @@ -3435,8 +3437,8 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str (pi->unacked_frames > 0)) __mod_retrans_timer(); - l2cap_ertm_send(sk); pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; + l2cap_ertm_send(sk); } break; @@ -3471,9 +3473,9 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; if (rx_control & L2CAP_CTRL_POLL) { - l2cap_retransmit_frame(sk, tx_seq); pi->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(sk); + l2cap_retransmit_frame(sk, tx_seq); l2cap_ertm_send(sk); if (pi->conn_state & L2CAP_CONN_WAIT_F) { pi->srej_save_reqseq = tx_seq; @@ -3517,7 +3519,6 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk struct l2cap_pinfo *pi; u16 control, len; u8 tx_seq; - int err; sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); if (!sk) { @@ -3569,13 +3570,11 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk goto drop; if (__is_iframe(control)) - err = l2cap_data_channel_iframe(sk, control, skb); + l2cap_data_channel_iframe(sk, control, skb); else - err = l2cap_data_channel_sframe(sk, control, skb); + l2cap_data_channel_sframe(sk, control, skb); - if (!err) - goto done; - break; + goto done; case L2CAP_MODE_STREAMING: control = get_unaligned_le16(skb->data); @@ -3601,7 +3600,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk else pi->expected_tx_seq = tx_seq + 1; - err = l2cap_sar_reassembly_sdu(sk, skb, control); + l2cap_sar_reassembly_sdu(sk, skb, control); goto done; diff --git a/net/compat.c b/net/compat.c index e1a56ade803b..a1fb1b079a82 100644 --- a/net/compat.c +++ b/net/compat.c @@ -754,26 +754,21 @@ asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len, asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags, - struct timespec __user *timeout) + struct compat_timespec __user *timeout) { int datagrams; struct timespec ktspec; - struct compat_timespec __user *utspec; if (timeout == NULL) return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, NULL); - utspec = (struct compat_timespec __user *)timeout; - if (get_user(ktspec.tv_sec, &utspec->tv_sec) || - get_user(ktspec.tv_nsec, &utspec->tv_nsec)) + if (get_compat_timespec(&ktspec, timeout)) return -EFAULT; datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, &ktspec); - if (datagrams > 0 && - (put_user(ktspec.tv_sec, &utspec->tv_sec) || - put_user(ktspec.tv_nsec, &utspec->tv_nsec))) + if (datagrams > 0 && put_compat_timespec(&ktspec, timeout)) datagrams = -EFAULT; return datagrams; diff --git a/net/core/dev.c b/net/core/dev.c index c36a17aafcf3..be9924f60ec3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4771,21 +4771,23 @@ static void net_set_todo(struct net_device *dev) static void rollback_registered_many(struct list_head *head) { - struct net_device *dev; + struct net_device *dev, *tmp; BUG_ON(dev_boot_phase); ASSERT_RTNL(); - list_for_each_entry(dev, head, unreg_list) { + list_for_each_entry_safe(dev, tmp, head, unreg_list) { /* Some devices call without registering - * for initialization unwind. + * for initialization unwind. Remove those + * devices and proceed with the remaining. */ if (dev->reg_state == NETREG_UNINITIALIZED) { pr_debug("unregister_netdevice: device %s/%p never " "was registered\n", dev->name, dev); WARN_ON(1); - return; + list_del(&dev->unreg_list); + continue; } BUG_ON(dev->reg_state != NETREG_REGISTERED); @@ -5033,6 +5035,11 @@ int register_netdevice(struct net_device *dev) rollback_registered(dev); dev->reg_state = NETREG_UNREGISTERED; } + /* + * Prevent userspace races by waiting until the network + * device is fully setup before sending notifications. + */ + rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); out: return ret; @@ -5595,6 +5602,12 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char /* Notify protocols, that a new device appeared. */ call_netdevice_notifiers(NETDEV_REGISTER, dev); + /* + * Prevent userspace races by waiting until the network + * device is fully setup before sending notifications. + */ + rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); + synchronize_net(); err = 0; out: diff --git a/net/core/pktgen.c b/net/core/pktgen.c index a23b45f08ec9..de0c2c726420 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -250,8 +250,7 @@ struct pktgen_dev { __u64 count; /* Default No packets to send */ __u64 sofar; /* How many pkts we've sent so far */ __u64 tx_bytes; /* How many bytes we've transmitted */ - __u64 errors; /* Errors when trying to transmit, - pkts will be re-sent */ + __u64 errors; /* Errors when trying to transmit, */ /* runtime counters relating to clone_skb */ @@ -3465,6 +3464,12 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) pkt_dev->seq_num++; pkt_dev->tx_bytes += pkt_dev->last_pkt_size; break; + case NET_XMIT_DROP: + case NET_XMIT_CN: + case NET_XMIT_POLICED: + /* skb has been consumed */ + pkt_dev->errors++; + break; default: /* Drivers are not supposed to return other values! */ if (net_ratelimit()) pr_info("pktgen: %s xmit error: %d\n", diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 33148a568199..794bcb897ff0 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1364,15 +1364,15 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi case NETDEV_UNREGISTER: rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); break; - case NETDEV_REGISTER: - rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); - break; case NETDEV_UP: case NETDEV_DOWN: rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); break; + case NETDEV_POST_INIT: + case NETDEV_REGISTER: case NETDEV_CHANGE: case NETDEV_GOING_DOWN: + case NETDEV_UNREGISTER_BATCH: break; default: rtmsg_ifinfo(RTM_NEWLINK, dev, 0); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index bfa3e7865a8c..93c4e060c91e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -93,7 +93,7 @@ static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, /* Pipe buffer operations for a socket. */ -static struct pipe_buf_operations sock_pipe_buf_ops = { +static const struct pipe_buf_operations sock_pipe_buf_ops = { .can_merge = 0, .map = generic_pipe_buf_map, .unmap = generic_pipe_buf_unmap, diff --git a/net/core/sock.c b/net/core/sock.c index 76ff58d43e26..e1f6f225f012 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1205,6 +1205,10 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) if (newsk->sk_prot->sockets_allocated) percpu_counter_inc(newsk->sk_prot->sockets_allocated); + + if (sock_flag(newsk, SOCK_TIMESTAMP) || + sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE)) + net_enable_timestamp(); } out: return newsk; diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c index f3e9ba1cfd01..57dfb9c8c4f2 100644 --- a/net/dccp/ccid.c +++ b/net/dccp/ccid.c @@ -77,34 +77,24 @@ int ccid_getsockopt_builtin_ccids(struct sock *sk, int len, return err; } -static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) +static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...) { struct kmem_cache *slab; - char slab_name_fmt[32], *slab_name; va_list args; va_start(args, fmt); vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); va_end(args); - slab_name = kstrdup(slab_name_fmt, GFP_KERNEL); - if (slab_name == NULL) - return NULL; - slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0, + slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, SLAB_HWCACHE_ALIGN, NULL); - if (slab == NULL) - kfree(slab_name); return slab; } static void ccid_kmem_cache_destroy(struct kmem_cache *slab) { - if (slab != NULL) { - const char *name = kmem_cache_name(slab); - + if (slab != NULL) kmem_cache_destroy(slab); - kfree(name); - } } static int ccid_activate(struct ccid_operations *ccid_ops) @@ -113,6 +103,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops) ccid_ops->ccid_hc_rx_slab = ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size, + ccid_ops->ccid_hc_rx_slab_name, "ccid%u_hc_rx_sock", ccid_ops->ccid_id); if (ccid_ops->ccid_hc_rx_slab == NULL) @@ -120,6 +111,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops) ccid_ops->ccid_hc_tx_slab = ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size, + ccid_ops->ccid_hc_tx_slab_name, "ccid%u_hc_tx_sock", ccid_ops->ccid_id); if (ccid_ops->ccid_hc_tx_slab == NULL) diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index facedd20b531..269958bf7fe9 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h @@ -49,6 +49,8 @@ struct ccid_operations { const char *ccid_name; struct kmem_cache *ccid_hc_rx_slab, *ccid_hc_tx_slab; + char ccid_hc_rx_slab_name[32]; + char ccid_hc_tx_slab_name[32]; __u32 ccid_hc_rx_obj_size, ccid_hc_tx_obj_size; /* Interface Routines */ diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index efbcfdc12796..dad7bc4878e0 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -408,7 +408,7 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, dccp_sync_mss(newsk, dst_mtu(dst)); - __inet_hash_nolisten(newsk); + __inet_hash_nolisten(newsk, NULL); __inet_inherit_port(sk, newsk); return newsk; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 6574215a1f51..baf05cf43c28 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -46,7 +46,7 @@ static void dccp_v6_hash(struct sock *sk) return; } local_bh_disable(); - __inet6_hash(sk); + __inet6_hash(sk, NULL); local_bh_enable(); } } @@ -644,7 +644,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; newinet->inet_rcv_saddr = LOOPBACK4_IPV6; - __inet6_hash(newsk); + __inet6_hash(newsk, NULL); __inet_inherit_port(sk, newsk); return newsk; diff --git a/net/dccp/probe.c b/net/dccp/probe.c index dc328425fa20..bace1d8cbcfd 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c @@ -43,7 +43,7 @@ static int bufsize = 64 * 1024; static const char procname[] = "dccpprobe"; static struct { - struct kfifo *fifo; + struct kfifo fifo; spinlock_t lock; wait_queue_head_t wait; struct timespec tstart; @@ -67,7 +67,7 @@ static void printl(const char *fmt, ...) len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args); va_end(args); - kfifo_put(dccpw.fifo, tbuf, len); + kfifo_in_locked(&dccpw.fifo, tbuf, len, &dccpw.lock); wake_up(&dccpw.wait); } @@ -109,7 +109,7 @@ static struct jprobe dccp_send_probe = { static int dccpprobe_open(struct inode *inode, struct file *file) { - kfifo_reset(dccpw.fifo); + kfifo_reset(&dccpw.fifo); getnstimeofday(&dccpw.tstart); return 0; } @@ -131,11 +131,11 @@ static ssize_t dccpprobe_read(struct file *file, char __user *buf, return -ENOMEM; error = wait_event_interruptible(dccpw.wait, - __kfifo_len(dccpw.fifo) != 0); + kfifo_len(&dccpw.fifo) != 0); if (error) goto out_free; - cnt = kfifo_get(dccpw.fifo, tbuf, len); + cnt = kfifo_out_locked(&dccpw.fifo, tbuf, len, &dccpw.lock); error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0; out_free: @@ -156,14 +156,13 @@ static __init int dccpprobe_init(void) init_waitqueue_head(&dccpw.wait); spin_lock_init(&dccpw.lock); - dccpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &dccpw.lock); - if (IS_ERR(dccpw.fifo)) - return PTR_ERR(dccpw.fifo); - + if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL)) + return ret; if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) goto err0; - ret = register_jprobe(&dccp_send_probe); + ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0), + "dccp"); if (ret) goto err1; @@ -172,14 +171,14 @@ static __init int dccpprobe_init(void) err1: proc_net_remove(&init_net, procname); err0: - kfifo_free(dccpw.fifo); + kfifo_free(&dccpw.fifo); return ret; } module_init(dccpprobe_init); static __exit void dccpprobe_exit(void) { - kfifo_free(dccpw.fifo); + kfifo_free(&dccpw.fifo); proc_net_remove(&init_net, procname); unregister_jprobe(&dccp_send_probe); diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 70491d9035eb..0c94a1ac2946 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -166,7 +166,7 @@ config IP_PNP_DHCP If unsure, say Y. Note that if you want to use DHCP, a DHCP server must be operating on your network. Read - <file:Documentation/filesystems/nfsroot.txt> for details. + <file:Documentation/filesystems/nfs/nfsroot.txt> for details. config IP_PNP_BOOTP bool "IP: BOOTP support" @@ -181,7 +181,7 @@ config IP_PNP_BOOTP does BOOTP itself, providing all necessary information on the kernel command line, you can say N here. If unsure, say Y. Note that if you want to use BOOTP, a BOOTP server must be operating on your network. - Read <file:Documentation/filesystems/nfsroot.txt> for details. + Read <file:Documentation/filesystems/nfs/nfsroot.txt> for details. config IP_PNP_RARP bool "IP: RARP support" @@ -194,7 +194,7 @@ config IP_PNP_RARP older protocol which is being obsoleted by BOOTP and DHCP), say Y here. Note that if you want to use RARP, a RARP server must be operating on your network. Read - <file:Documentation/filesystems/nfsroot.txt> for details. + <file:Documentation/filesystems/nfs/nfsroot.txt> for details. # not yet ready.. # bool ' IP: ARP support' CONFIG_IP_PNP_ARP diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 5cdbc102a418..040c4f05b653 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1397,6 +1397,7 @@ static struct devinet_sysctl_table { DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE, "accept_source_route"), DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"), + DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"), DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"), DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"), DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"), diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 3323168ee52d..82dbf711d6d0 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -252,6 +252,8 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, no_addr = in_dev->ifa_list == NULL; rpf = IN_DEV_RPFILTER(in_dev); accept_local = IN_DEV_ACCEPT_LOCAL(in_dev); + if (mark && !IN_DEV_SRC_VMARK(in_dev)) + fl.mark = 0; } rcu_read_unlock(); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index bdb78dd180ce..1aaa8110d84b 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -368,7 +368,7 @@ static int inet_diag_bc_run(const void *bc, int len, yes = entry->sport >= op[1].no; break; case INET_DIAG_BC_S_LE: - yes = entry->dport <= op[1].no; + yes = entry->sport <= op[1].no; break; case INET_DIAG_BC_D_GE: yes = entry->dport >= op[1].no; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 21e5e32d8c60..2b79377b468d 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -351,12 +351,13 @@ static inline u32 inet_sk_port_offset(const struct sock *sk) inet->inet_dport); } -void __inet_hash_nolisten(struct sock *sk) +int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; struct hlist_nulls_head *list; spinlock_t *lock; struct inet_ehash_bucket *head; + int twrefcnt = 0; WARN_ON(!sk_unhashed(sk)); @@ -367,8 +368,13 @@ void __inet_hash_nolisten(struct sock *sk) spin_lock(lock); __sk_nulls_add_node_rcu(sk, list); + if (tw) { + WARN_ON(sk->sk_hash != tw->tw_hash); + twrefcnt = inet_twsk_unhash(tw); + } spin_unlock(lock); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); + return twrefcnt; } EXPORT_SYMBOL_GPL(__inet_hash_nolisten); @@ -378,7 +384,7 @@ static void __inet_hash(struct sock *sk) struct inet_listen_hashbucket *ilb; if (sk->sk_state != TCP_LISTEN) { - __inet_hash_nolisten(sk); + __inet_hash_nolisten(sk, NULL); return; } @@ -427,7 +433,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk, u32 port_offset, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, struct inet_timewait_sock **), - void (*hash)(struct sock *sk)) + int (*hash)(struct sock *sk, struct inet_timewait_sock *twp)) { struct inet_hashinfo *hinfo = death_row->hashinfo; const unsigned short snum = inet_sk(sk)->inet_num; @@ -435,6 +441,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct inet_bind_bucket *tb; int ret; struct net *net = sock_net(sk); + int twrefcnt = 1; if (!snum) { int i, remaining, low, high, port; @@ -493,13 +500,18 @@ ok: inet_bind_hash(sk, tb, port); if (sk_unhashed(sk)) { inet_sk(sk)->inet_sport = htons(port); - hash(sk); + twrefcnt += hash(sk, tw); } + if (tw) + twrefcnt += inet_twsk_bind_unhash(tw, hinfo); spin_unlock(&head->lock); if (tw) { inet_twsk_deschedule(tw, death_row); - inet_twsk_put(tw); + while (twrefcnt) { + twrefcnt--; + inet_twsk_put(tw); + } } ret = 0; @@ -510,7 +522,7 @@ ok: tb = inet_csk(sk)->icsk_bind_hash; spin_lock_bh(&head->lock); if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { - hash(sk); + hash(sk, NULL); spin_unlock_bh(&head->lock); return 0; } else { diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 0fdf45e4c90c..cc94cc2d8b2d 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -15,9 +15,13 @@ #include <net/ip.h> -/* - * unhash a timewait socket from established hash - * lock must be hold by caller +/** + * inet_twsk_unhash - unhash a timewait socket from established hash + * @tw: timewait socket + * + * unhash a timewait socket from established hash, if hashed. + * ehash lock must be held by caller. + * Returns 1 if caller should call inet_twsk_put() after lock release. */ int inet_twsk_unhash(struct inet_timewait_sock *tw) { @@ -26,6 +30,37 @@ int inet_twsk_unhash(struct inet_timewait_sock *tw) hlist_nulls_del_rcu(&tw->tw_node); sk_nulls_node_init(&tw->tw_node); + /* + * We cannot call inet_twsk_put() ourself under lock, + * caller must call it for us. + */ + return 1; +} + +/** + * inet_twsk_bind_unhash - unhash a timewait socket from bind hash + * @tw: timewait socket + * @hashinfo: hashinfo pointer + * + * unhash a timewait socket from bind hash, if hashed. + * bind hash lock must be held by caller. + * Returns 1 if caller should call inet_twsk_put() after lock release. + */ +int inet_twsk_bind_unhash(struct inet_timewait_sock *tw, + struct inet_hashinfo *hashinfo) +{ + struct inet_bind_bucket *tb = tw->tw_tb; + + if (!tb) + return 0; + + __hlist_del(&tw->tw_bind_node); + tw->tw_tb = NULL; + inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); + /* + * We cannot call inet_twsk_put() ourself under lock, + * caller must call it for us. + */ return 1; } @@ -34,7 +69,6 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo) { struct inet_bind_hashbucket *bhead; - struct inet_bind_bucket *tb; int refcnt; /* Unlink from established hashes. */ spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); @@ -46,15 +80,11 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw, /* Disassociate with bind bucket. */ bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, hashinfo->bhash_size)]; + spin_lock(&bhead->lock); - tb = tw->tw_tb; - if (tb) { - __hlist_del(&tw->tw_bind_node); - tw->tw_tb = NULL; - inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); - refcnt++; - } + refcnt += inet_twsk_bind_unhash(tw, hashinfo); spin_unlock(&bhead->lock); + #ifdef SOCK_REFCNT_DEBUG if (atomic_read(&tw->tw_refcnt) != 1) { printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", @@ -126,7 +156,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, /* * Notes : - * - We initially set tw_refcnt to 0 in inet_twsk_alloc() + * - We initially set tw_refcnt to 0 in inet_twsk_alloc() * - We add one reference for the bhash link * - We add one reference for the ehash link * - We want this refcnt update done before allowing other @@ -136,7 +166,6 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, spin_unlock(lock); } - EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state) @@ -177,7 +206,6 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat return tw; } - EXPORT_SYMBOL_GPL(inet_twsk_alloc); /* Returns non-zero if quota exceeded. */ @@ -256,7 +284,6 @@ void inet_twdr_hangman(unsigned long data) out: spin_unlock(&twdr->death_lock); } - EXPORT_SYMBOL_GPL(inet_twdr_hangman); void inet_twdr_twkill_work(struct work_struct *work) @@ -287,7 +314,6 @@ void inet_twdr_twkill_work(struct work_struct *work) spin_unlock_bh(&twdr->death_lock); } } - EXPORT_SYMBOL_GPL(inet_twdr_twkill_work); /* These are always called from BH context. See callers in @@ -307,7 +333,6 @@ void inet_twsk_deschedule(struct inet_timewait_sock *tw, spin_unlock(&twdr->death_lock); __inet_twsk_kill(tw, twdr->hashinfo); } - EXPORT_SYMBOL(inet_twsk_deschedule); void inet_twsk_schedule(struct inet_timewait_sock *tw, @@ -388,7 +413,6 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw, mod_timer(&twdr->tw_timer, jiffies + twdr->period); spin_unlock(&twdr->death_lock); } - EXPORT_SYMBOL_GPL(inet_twsk_schedule); void inet_twdr_twcal_tick(unsigned long data) @@ -449,7 +473,6 @@ out: #endif spin_unlock(&twdr->death_lock); } - EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); void inet_twsk_purge(struct inet_hashinfo *hashinfo, diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index e34013a78ef4..3451799e3dbf 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -254,7 +254,7 @@ int ip_mc_output(struct sk_buff *skb) */ if (rt->rt_flags&RTCF_MULTICAST) { - if ((!sk || inet_sk(sk)->mc_loop) + if (sk_mc_loop(sk) #ifdef CONFIG_IP_MROUTE /* Small optimization: do not loopback not local frames, which returned after forwarding; they will be dropped diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 4e08b7f2331c..10a6a604bf32 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -1446,7 +1446,7 @@ late_initcall(ip_auto_config); /* * Decode any IP configuration options in the "ip=" or "nfsaddrs=" kernel - * command line parameter. See Documentation/filesystems/nfsroot.txt. + * command line parameter. See Documentation/filesystems/nfs/nfsroot.txt. */ static int __init ic_proto_name(char *name) { diff --git a/net/ipv4/route.c b/net/ipv4/route.c index e446496f564f..d62b05d33384 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -586,7 +586,9 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net) { remove_proc_entry("rt_cache", net->proc_net_stat); remove_proc_entry("rt_cache", net->proc_net); +#ifdef CONFIG_NET_CLS_ROUTE remove_proc_entry("rt_acct", net->proc_net); +#endif } static struct pernet_operations ip_rt_proc_ops __net_initdata = { diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 26399ad2a289..66fd80ef2473 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -277,6 +277,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); + /* check for timestamp cookie support */ + memset(&tcp_opt, 0, sizeof(tcp_opt)); + tcp_parse_options(skb, &tcp_opt, &hash_location, 0); + + if (tcp_opt.saw_tstamp) + cookie_check_timestamp(&tcp_opt); + ret = NULL; req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ if (!req) @@ -292,6 +299,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, ireq->loc_addr = ip_hdr(skb)->daddr; ireq->rmt_addr = ip_hdr(skb)->saddr; ireq->ecn_ok = 0; + ireq->snd_wscale = tcp_opt.snd_wscale; + ireq->rcv_wscale = tcp_opt.rcv_wscale; + ireq->sack_ok = tcp_opt.sack_ok; + ireq->wscale_ok = tcp_opt.wscale_ok; + ireq->tstamp_ok = tcp_opt.saw_tstamp; + req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; /* We throwed the options of the initial SYN away, so we hope * the ACK carries the same options again (see RFC1122 4.2.3.8) @@ -340,20 +353,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, } } - /* check for timestamp cookie support */ - memset(&tcp_opt, 0, sizeof(tcp_opt)); - tcp_parse_options(skb, &tcp_opt, &hash_location, 0, &rt->u.dst); - - if (tcp_opt.saw_tstamp) - cookie_check_timestamp(&tcp_opt); - - ireq->snd_wscale = tcp_opt.snd_wscale; - ireq->rcv_wscale = tcp_opt.rcv_wscale; - ireq->sack_ok = tcp_opt.sack_ok; - ireq->wscale_ok = tcp_opt.wscale_ok; - ireq->tstamp_ok = tcp_opt.saw_tstamp; - req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; - /* Try to redo what tcp_v4_send_synack did. */ req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c8666b70cde0..b0a26bb25e2e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2540,11 +2540,6 @@ static int do_tcp_getsockopt(struct sock *sk, int level, ctd.tcpct_cookie_desired = cvp->cookie_desired; ctd.tcpct_s_data_desired = cvp->s_data_desired; - /* Cookie(s) saved, return as nonce */ - if (sizeof(ctd.tcpct_value) < cvp->cookie_pair_size) { - /* impossible? */ - return -EINVAL; - } memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0], cvp->cookie_pair_size); ctd.tcpct_used = cvp->cookie_pair_size; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 57ae96a04220..28e029632493 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2717,6 +2717,35 @@ static void tcp_try_undo_dsack(struct sock *sk) } } +/* We can clear retrans_stamp when there are no retransmissions in the + * window. It would seem that it is trivially available for us in + * tp->retrans_out, however, that kind of assumptions doesn't consider + * what will happen if errors occur when sending retransmission for the + * second time. ...It could the that such segment has only + * TCPCB_EVER_RETRANS set at the present time. It seems that checking + * the head skb is enough except for some reneging corner cases that + * are not worth the effort. + * + * Main reason for all this complexity is the fact that connection dying + * time now depends on the validity of the retrans_stamp, in particular, + * that successive retransmissions of a segment must not advance + * retrans_stamp under any conditions. + */ +static int tcp_any_retrans_done(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *skb; + + if (tp->retrans_out) + return 1; + + skb = tcp_write_queue_head(sk); + if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) + return 1; + + return 0; +} + /* Undo during fast recovery after partial ACK. */ static int tcp_try_undo_partial(struct sock *sk, int acked) @@ -2729,7 +2758,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked) /* Plain luck! Hole if filled with delayed * packet, rather than with a retransmit. */ - if (tp->retrans_out == 0) + if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); @@ -2788,7 +2817,7 @@ static void tcp_try_keep_open(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); int state = TCP_CA_Open; - if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker) + if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker) state = TCP_CA_Disorder; if (inet_csk(sk)->icsk_ca_state != state) { @@ -2803,7 +2832,7 @@ static void tcp_try_to_open(struct sock *sk, int flag) tcp_verify_left_out(tp); - if (!tp->frto_counter && tp->retrans_out == 0) + if (!tp->frto_counter && !tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; if (flag & FLAG_ECE) @@ -3698,7 +3727,7 @@ old_ack: * the fast version below fails. */ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, - u8 **hvpp, int estab, struct dst_entry *dst) + u8 **hvpp, int estab) { unsigned char *ptr; struct tcphdr *th = tcp_hdr(skb); @@ -3737,8 +3766,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, break; case TCPOPT_WINDOW: if (opsize == TCPOLEN_WINDOW && th->syn && - !estab && sysctl_tcp_window_scaling && - !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)) { + !estab && sysctl_tcp_window_scaling) { __u8 snd_wscale = *(__u8 *)ptr; opt_rx->wscale_ok = 1; if (snd_wscale > 14) { @@ -3754,8 +3782,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, case TCPOPT_TIMESTAMP: if ((opsize == TCPOLEN_TIMESTAMP) && ((estab && opt_rx->tstamp_ok) || - (!estab && sysctl_tcp_timestamps && - !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP)))) { + (!estab && sysctl_tcp_timestamps))) { opt_rx->saw_tstamp = 1; opt_rx->rcv_tsval = get_unaligned_be32(ptr); opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); @@ -3763,8 +3790,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, break; case TCPOPT_SACK_PERM: if (opsize == TCPOLEN_SACK_PERM && th->syn && - !estab && sysctl_tcp_sack && - !dst_feature(dst, RTAX_FEATURE_NO_SACK)) { + !estab && sysctl_tcp_sack) { opt_rx->sack_ok = 1; tcp_sack_reset(opt_rx); } @@ -3849,7 +3875,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, if (tcp_parse_aligned_timestamp(tp, th)) return 1; } - tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); + tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); return 1; } @@ -4104,10 +4130,8 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); - struct dst_entry *dst = __sk_dst_get(sk); - if (tcp_is_sack(tp) && sysctl_tcp_dsack && - !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) { + if (tcp_is_sack(tp) && sysctl_tcp_dsack) { int mib_idx; if (before(seq, tp->rcv_nxt)) @@ -4136,15 +4160,13 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); - struct dst_entry *dst = __sk_dst_get(sk); if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_enter_quickack_mode(sk); - if (tcp_is_sack(tp) && sysctl_tcp_dsack && - !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) { + if (tcp_is_sack(tp) && sysctl_tcp_dsack) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) @@ -5399,11 +5421,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, u8 *hash_location; struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); - struct dst_entry *dst = __sk_dst_get(sk); struct tcp_cookie_values *cvp = tp->cookie_values; int saved_clamp = tp->rx_opt.mss_clamp; - tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, dst); + tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0); if (th->ack) { /* rfc793: diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 29002ab26e0d..65b8ebfd078a 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1262,20 +1262,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; #endif - ireq = inet_rsk(req); - ireq->loc_addr = daddr; - ireq->rmt_addr = saddr; - ireq->no_srccheck = inet_sk(sk)->transparent; - ireq->opt = tcp_v4_save_options(sk, skb); - - dst = inet_csk_route_req(sk, req); - if(!dst) - goto drop_and_free; - tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = TCP_MSS_DEFAULT; tmp_opt.user_mss = tp->rx_opt.user_mss; - tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); + tcp_parse_options(skb, &tmp_opt, &hash_location, 0); if (tmp_opt.cookie_plus > 0 && tmp_opt.saw_tstamp && @@ -1319,8 +1309,14 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; tcp_openreq_init(req, &tmp_opt, skb); + ireq = inet_rsk(req); + ireq->loc_addr = daddr; + ireq->rmt_addr = saddr; + ireq->no_srccheck = inet_sk(sk)->transparent; + ireq->opt = tcp_v4_save_options(sk, skb); + if (security_inet_conn_request(sk, skb, req)) - goto drop_and_release; + goto drop_and_free; if (!want_cookie) TCP_ECN_create_request(req, tcp_hdr(skb)); @@ -1345,6 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) */ if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle && + (dst = inet_csk_route_req(sk, req)) != NULL && (peer = rt_get_peer((struct rtable *)dst)) != NULL && peer->v4daddr == saddr) { if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && @@ -1464,7 +1461,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, } #endif - __inet_hash_nolisten(newsk); + __inet_hash_nolisten(newsk, NULL); __inet_inherit_port(sk, newsk); return newsk; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 87accec8d097..f206ee5dda80 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -95,9 +95,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); int paws_reject = 0; + tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { - tmp_opt.tstamp_ok = 1; - tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL); + tcp_parse_options(skb, &tmp_opt, &hash_location, 0); if (tmp_opt.saw_tstamp) { tmp_opt.ts_recent = tcptw->tw_ts_recent; @@ -526,9 +526,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); int paws_reject = 0; - if ((th->doff > (sizeof(*th) >> 2)) && (req->ts_recent)) { - tmp_opt.tstamp_ok = 1; - tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL); + tmp_opt.saw_tstamp = 0; + if (th->doff > (sizeof(struct tcphdr)>>2)) { + tcp_parse_options(skb, &tmp_opt, &hash_location, 0); if (tmp_opt.saw_tstamp) { tmp_opt.ts_recent = req->ts_recent; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 93316a96d820..383ce237640f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -553,7 +553,6 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, struct tcp_md5sig_key **md5) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_cookie_values *cvp = tp->cookie_values; - struct dst_entry *dst = __sk_dst_get(sk); unsigned remaining = MAX_TCP_OPTION_SPACE; u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? tcp_cookie_size_check(cvp->cookie_desired) : @@ -581,22 +580,18 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, opts->mss = tcp_advertise_mss(sk); remaining -= TCPOLEN_MSS_ALIGNED; - if (likely(sysctl_tcp_timestamps && - !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) && - *md5 == NULL)) { + if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { opts->options |= OPTION_TS; opts->tsval = TCP_SKB_CB(skb)->when; opts->tsecr = tp->rx_opt.ts_recent; remaining -= TCPOLEN_TSTAMP_ALIGNED; } - if (likely(sysctl_tcp_window_scaling && - !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) { + if (likely(sysctl_tcp_window_scaling)) { opts->ws = tp->rx_opt.rcv_wscale; opts->options |= OPTION_WSCALE; remaining -= TCPOLEN_WSCALE_ALIGNED; } - if (likely(sysctl_tcp_sack && - !dst_feature(dst, RTAX_FEATURE_NO_SACK))) { + if (likely(sysctl_tcp_sack)) { opts->options |= OPTION_SACK_ADVERTISE; if (unlikely(!(OPTION_TS & opts->options))) remaining -= TCPOLEN_SACKPERM_ALIGNED; @@ -2527,9 +2522,7 @@ static void tcp_connect_init(struct sock *sk) * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. */ tp->tcp_header_len = sizeof(struct tcphdr) + - (sysctl_tcp_timestamps && - (!dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) ? - TCPOLEN_TSTAMP_ALIGNED : 0)); + (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); #ifdef CONFIG_TCP_MD5SIG if (tp->af_specific->md5_lookup(sk, sk) != NULL) @@ -2555,8 +2548,7 @@ static void tcp_connect_init(struct sock *sk) tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), &tp->rcv_wnd, &tp->window_clamp, - (sysctl_tcp_window_scaling && - !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)), + sysctl_tcp_window_scaling, &rcv_wscale); tp->rx_opt.rcv_wscale = rcv_wscale; diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index bb110c5ce1d2..9bc805df95d2 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -39,9 +39,9 @@ static int port __read_mostly = 0; MODULE_PARM_DESC(port, "Port to match (0=all)"); module_param(port, int, 0); -static int bufsize __read_mostly = 4096; +static unsigned int bufsize __read_mostly = 4096; MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)"); -module_param(bufsize, int, 0); +module_param(bufsize, uint, 0); static int full __read_mostly; MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); @@ -75,12 +75,12 @@ static struct { static inline int tcp_probe_used(void) { - return (tcp_probe.head - tcp_probe.tail) % bufsize; + return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1); } static inline int tcp_probe_avail(void) { - return bufsize - tcp_probe_used(); + return bufsize - tcp_probe_used() - 1; } /* @@ -116,7 +116,7 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, p->ssthresh = tcp_current_ssthresh(sk); p->srtt = tp->srtt >> 3; - tcp_probe.head = (tcp_probe.head + 1) % bufsize; + tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1); } tcp_probe.lastcwnd = tp->snd_cwnd; spin_unlock(&tcp_probe.lock); @@ -149,7 +149,7 @@ static int tcpprobe_open(struct inode * inode, struct file * file) static int tcpprobe_sprint(char *tbuf, int n) { const struct tcp_log *p - = tcp_probe.log + tcp_probe.tail % bufsize; + = tcp_probe.log + tcp_probe.tail; struct timespec tv = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); @@ -192,7 +192,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf, width = tcpprobe_sprint(tbuf, sizeof(tbuf)); if (cnt + width < len) - tcp_probe.tail = (tcp_probe.tail + 1) % bufsize; + tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1); spin_unlock_bh(&tcp_probe.lock); @@ -222,9 +222,10 @@ static __init int tcpprobe_init(void) init_waitqueue_head(&tcp_probe.wait); spin_lock_init(&tcp_probe.lock); - if (bufsize < 0) + if (bufsize == 0) return -EINVAL; + bufsize = roundup_pow_of_two(bufsize); tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL); if (!tcp_probe.log) goto err0; @@ -236,7 +237,7 @@ static __init int tcpprobe_init(void) if (ret) goto err1; - pr_info("TCP probe registered (port=%d)\n", port); + pr_info("TCP probe registered (port=%d) bufsize=%u\n", port, bufsize); return 0; err1: proc_net_remove(&init_net, procname); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 8353a538cd4c..8816a20c2597 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -132,6 +132,35 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) } } +/* This function calculates a "timeout" which is equivalent to the timeout of a + * TCP connection after "boundary" unsucessful, exponentially backed-off + * retransmissions with an initial RTO of TCP_RTO_MIN. + */ +static bool retransmits_timed_out(struct sock *sk, + unsigned int boundary) +{ + unsigned int timeout, linear_backoff_thresh; + unsigned int start_ts; + + if (!inet_csk(sk)->icsk_retransmits) + return false; + + if (unlikely(!tcp_sk(sk)->retrans_stamp)) + start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when; + else + start_ts = tcp_sk(sk)->retrans_stamp; + + linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); + + if (boundary <= linear_backoff_thresh) + timeout = ((2 << boundary) - 1) * TCP_RTO_MIN; + else + timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + + (boundary - linear_backoff_thresh) * TCP_RTO_MAX; + + return (tcp_time_stamp - start_ts) >= timeout; +} + /* A write timeout has occurred. Process the after effects. */ static int tcp_write_timeout(struct sock *sk) { diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1f9534846ca9..f0126fdd7e04 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -216,9 +216,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, * force rand to be an odd multiple of UDP_HTABLE_SIZE */ rand = (rand | 1) * (udptable->mask + 1); - for (last = first + udptable->mask + 1; - first != last; - first++) { + last = first + udptable->mask + 1; + do { hslot = udp_hashslot(udptable, net, first); bitmap_zero(bitmap, PORTS_PER_CHAIN); spin_lock_bh(&hslot->lock); @@ -238,7 +237,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, snum += rand; } while (snum != first); spin_unlock_bh(&hslot->lock); - } + } while (++first != last); goto fail; } else { hslot = udp_hashslot(udptable, net, snum); diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 8c08a28d8f83..67107d63c1cd 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -15,7 +15,6 @@ #include <net/xfrm.h> #include <net/ip.h> -static struct dst_ops xfrm4_dst_ops; static struct xfrm_policy_afinfo xfrm4_policy_afinfo; static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, @@ -190,8 +189,10 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) static inline int xfrm4_garbage_collect(struct dst_ops *ops) { - xfrm4_policy_afinfo.garbage_collect(&init_net); - return (atomic_read(&xfrm4_dst_ops.entries) > xfrm4_dst_ops.gc_thresh*2); + struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops); + + xfrm4_policy_afinfo.garbage_collect(net); + return (atomic_read(&ops->entries) > ops->gc_thresh * 2); } static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu) @@ -268,7 +269,7 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { static struct ctl_table xfrm4_policy_table[] = { { .procname = "xfrm4_gc_thresh", - .data = &xfrm4_dst_ops.gc_thresh, + .data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, @@ -295,8 +296,6 @@ static void __exit xfrm4_policy_fini(void) void __init xfrm4_init(int rt_max_size) { - xfrm4_state_init(); - xfrm4_policy_init(); /* * Select a default value for the gc_thresh based on the main route * table hash size. It seems to me the worst case scenario is when @@ -308,6 +307,9 @@ void __init xfrm4_init(int rt_max_size) * and start cleaning when were 1/2 full */ xfrm4_dst_ops.gc_thresh = rt_max_size/2; + + xfrm4_state_init(); + xfrm4_policy_init(); #ifdef CONFIG_SYSCTL sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, xfrm4_policy_table); diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index df159fffe4bc..4bac362b1335 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -559,6 +559,11 @@ static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb) return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev); } +static inline struct net *ipv6_skb_net(struct sk_buff *skb) +{ + return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev); +} + /* Router Alert as of RFC 2711 */ static int ipv6_hop_ra(struct sk_buff *skb, int optoff) @@ -580,8 +585,8 @@ static int ipv6_hop_ra(struct sk_buff *skb, int optoff) static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); + struct net *net = ipv6_skb_net(skb); u32 pkt_len; - struct net *net = dev_net(skb_dst(skb)->dev); if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index c813e294ec0c..633a6c266136 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -22,9 +22,10 @@ #include <net/inet6_hashtables.h> #include <net/ip.h> -void __inet6_hash(struct sock *sk) +int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; + int twrefcnt = 0; WARN_ON(!sk_unhashed(sk)); @@ -45,10 +46,15 @@ void __inet6_hash(struct sock *sk) lock = inet_ehash_lockp(hashinfo, hash); spin_lock(lock); __sk_nulls_add_node_rcu(sk, list); + if (tw) { + WARN_ON(sk->sk_hash != tw->tw_hash); + twrefcnt = inet_twsk_unhash(tw); + } spin_unlock(lock); } sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); + return twrefcnt; } EXPORT_SYMBOL(__inet6_hash); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index cd48801a8d6f..eb6d09728633 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -121,10 +121,9 @@ static int ip6_output2(struct sk_buff *skb) skb->dev = dev; if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { - struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL; struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); - if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) && + if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && ((mroute6_socket(dev_net(dev)) && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 312c20adc83f..624a54832a7c 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -63,6 +63,7 @@ struct nf_ct_frag6_queue struct inet_frag_queue q; __be32 id; /* fragment id */ + u32 user; struct in6_addr saddr; struct in6_addr daddr; diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 3b3a95607125..2cddea3bd6be 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -708,7 +708,8 @@ static void ip6_frags_ns_sysctl_unregister(struct net *net) table = net->ipv6.sysctl.frags_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); - kfree(table); + if (!net_eq(net, &init_net)) + kfree(table); } static struct ctl_table_header *ip6_ctl_header; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index db3b27303890..c2bd74c5f8d9 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2630,6 +2630,7 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net) table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity; table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; + table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; } return table; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 5b9af508b8f2..7208a06576c6 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -185,6 +185,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); + /* check for timestamp cookie support */ + memset(&tcp_opt, 0, sizeof(tcp_opt)); + tcp_parse_options(skb, &tcp_opt, &hash_location, 0); + + if (tcp_opt.saw_tstamp) + cookie_check_timestamp(&tcp_opt); + ret = NULL; req = inet6_reqsk_alloc(&tcp6_request_sock_ops); if (!req) @@ -218,6 +225,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) req->expires = 0UL; req->retrans = 0; ireq->ecn_ok = 0; + ireq->snd_wscale = tcp_opt.snd_wscale; + ireq->rcv_wscale = tcp_opt.rcv_wscale; + ireq->sack_ok = tcp_opt.sack_ok; + ireq->wscale_ok = tcp_opt.wscale_ok; + ireq->tstamp_ok = tcp_opt.saw_tstamp; + req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; treq->rcv_isn = ntohl(th->seq) - 1; treq->snt_isn = cookie; @@ -253,21 +266,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) goto out_free; } - /* check for timestamp cookie support */ - memset(&tcp_opt, 0, sizeof(tcp_opt)); - tcp_parse_options(skb, &tcp_opt, &hash_location, 0, dst); - - if (tcp_opt.saw_tstamp) - cookie_check_timestamp(&tcp_opt); - - req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; - - ireq->snd_wscale = tcp_opt.snd_wscale; - ireq->rcv_wscale = tcp_opt.rcv_wscale; - ireq->sack_ok = tcp_opt.sack_ok; - ireq->wscale_ok = tcp_opt.wscale_ok; - ireq->tstamp_ok = tcp_opt.saw_tstamp; - req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); tcp_select_initial_window(tcp_full_space(sk), req->mss, &req->rcv_wnd, &req->window_clamp, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index aadd7cef73b3..febfd595a40d 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -96,7 +96,7 @@ static void tcp_v6_hash(struct sock *sk) return; } local_bh_disable(); - __inet6_hash(sk); + __inet6_hash(sk, NULL); local_bh_enable(); } } @@ -1169,7 +1169,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) struct inet6_request_sock *treq; struct ipv6_pinfo *np = inet6_sk(sk); struct tcp_sock *tp = tcp_sk(sk); - struct dst_entry *dst = __sk_dst_get(sk); __u32 isn = TCP_SKB_CB(skb)->when; #ifdef CONFIG_SYN_COOKIES int want_cookie = 0; @@ -1208,7 +1207,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); tmp_opt.user_mss = tp->rx_opt.user_mss; - tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); + tcp_parse_options(skb, &tmp_opt, &hash_location, 0); if (tmp_opt.cookie_plus > 0 && tmp_opt.saw_tstamp && @@ -1496,7 +1495,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, } #endif - __inet6_hash(newsk); + __inet6_hash(newsk, NULL); __inet_inherit_port(sk, newsk); return newsk; diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 7254e3f899a7..dbdc696f5fc5 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -24,7 +24,6 @@ #include <net/mip6.h> #endif -static struct dst_ops xfrm6_dst_ops; static struct xfrm_policy_afinfo xfrm6_policy_afinfo; static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, @@ -224,8 +223,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) static inline int xfrm6_garbage_collect(struct dst_ops *ops) { - xfrm6_policy_afinfo.garbage_collect(&init_net); - return (atomic_read(&xfrm6_dst_ops.entries) > xfrm6_dst_ops.gc_thresh*2); + struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops); + + xfrm6_policy_afinfo.garbage_collect(net); + return (atomic_read(&ops->entries) > ops->gc_thresh * 2); } static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu) @@ -310,7 +311,7 @@ static void xfrm6_policy_fini(void) static struct ctl_table xfrm6_policy_table[] = { { .procname = "xfrm6_gc_thresh", - .data = &xfrm6_dst_ops.gc_thresh, + .data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, @@ -326,13 +327,6 @@ int __init xfrm6_init(void) int ret; unsigned int gc_thresh; - ret = xfrm6_policy_init(); - if (ret) - goto out; - - ret = xfrm6_state_init(); - if (ret) - goto out_policy; /* * We need a good default value for the xfrm6 gc threshold. * In ipv4 we set it to the route hash table size * 8, which @@ -346,6 +340,15 @@ int __init xfrm6_init(void) */ gc_thresh = FIB6_TABLE_HASHSZ * 8; xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh; + + ret = xfrm6_policy_init(); + if (ret) + goto out; + + ret = xfrm6_state_init(); + if (ret) + goto out_policy; + #ifdef CONFIG_SYSCTL sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, xfrm6_policy_table); diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h index b001c361ad30..4300df35d37d 100644 --- a/net/irda/irnet/irnet.h +++ b/net/irda/irnet/irnet.h @@ -249,6 +249,7 @@ #include <linux/poll.h> #include <linux/capability.h> #include <linux/ctype.h> /* isspace() */ +#include <linux/string.h> /* skip_spaces() */ #include <asm/uaccess.h> #include <linux/init.h> diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 7dea882dbb75..156020d138b5 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c @@ -76,9 +76,8 @@ irnet_ctrl_write(irnet_socket * ap, /* Look at the next command */ start = next; - /* Scrap whitespaces before the command */ - while(isspace(*start)) - start++; + /* Scrap whitespaces before the command */ + start = skip_spaces(start); /* ',' is our command separator */ next = strchr(start, ','); @@ -133,8 +132,7 @@ irnet_ctrl_write(irnet_socket * ap, char * endp; /* Scrap whitespaces before the command */ - while(isspace(*begp)) - begp++; + begp = skip_spaces(begp); /* Convert argument to a number (last arg is the base) */ addr = simple_strtoul(begp, &endp, 16); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 1e428863574f..c18286a2167b 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -221,7 +221,7 @@ static int afiucv_pm_restore_thaw(struct device *dev) return 0; } -static struct dev_pm_ops afiucv_pm_ops = { +static const struct dev_pm_ops afiucv_pm_ops = { .prepare = afiucv_pm_prepare, .complete = afiucv_pm_complete, .freeze = afiucv_pm_freeze, diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 3b1f5f5f8de7..fd8b28361a64 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -93,7 +93,7 @@ static int iucv_pm_freeze(struct device *); static int iucv_pm_thaw(struct device *); static int iucv_pm_restore(struct device *); -static struct dev_pm_ops iucv_pm_ops = { +static const struct dev_pm_ops iucv_pm_ops = { .prepare = iucv_pm_prepare, .complete = iucv_pm_complete, .freeze = iucv_pm_freeze, diff --git a/net/key/af_key.c b/net/key/af_key.c index 84209fbbeb17..76fa6fef6473 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1193,6 +1193,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, x->aalg->alg_key_len = key->sadb_key_bits; memcpy(x->aalg->alg_key, key+1, keysize); } + x->aalg->alg_trunc_len = a->uinfo.auth.icv_truncbits; x->props.aalgo = sa->sadb_sa_auth; /* x->algo.flags = sa->sadb_sa_flags; */ } diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 93ee1fd5c08d..9ae1a4760b58 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -354,7 +354,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->rx_packets = sta->rx_packets; sinfo->tx_packets = sta->tx_packets; - if (sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) { + if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) || + (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) { sinfo->filled |= STATION_INFO_SIGNAL; sinfo->signal = (s8)sta->last_signal; } @@ -1330,6 +1331,9 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_conf *conf = &local->hw.conf; + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) return -EOPNOTSUPP; diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h index ee94ea0c67e9..da8497ef7063 100644 --- a/net/mac80211/driver-trace.h +++ b/net/mac80211/driver-trace.h @@ -680,7 +680,7 @@ TRACE_EVENT(drv_ampdu_action, __entry->ret = ret; __entry->action = action; __entry->tid = tid; - __entry->ssn = *ssn; + __entry->ssn = ssn ? *ssn : 0; ), TP_printk( diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 3787455fb696..d7dcee680728 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -34,9 +34,28 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, ht_cap->ht_supported = true; - ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) & sband->ht_cap.cap; - ht_cap->cap &= ~IEEE80211_HT_CAP_SM_PS; - ht_cap->cap |= sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS; + /* + * The bits listed in this expression should be + * the same for the peer and us, if the station + * advertises more then we can't use those thus + * we mask them out. + */ + ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) & + (sband->ht_cap.cap | + ~(IEEE80211_HT_CAP_LDPC_CODING | + IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_GRN_FLD | + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40 | + IEEE80211_HT_CAP_DSSSCCK40)); + /* + * The STBC bits are asymmetric -- if we don't have + * TX then mask out the peer's RX and vice versa. + */ + if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)) + ht_cap->cap &= ~IEEE80211_HT_CAP_RX_STBC; + if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)) + ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC; ampdu_info = ht_cap_ie->ampdu_params_info; ht_cap->ampdu_factor = diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 10d13856f86c..1f2db647bb5c 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -382,6 +382,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, u8 *bssid,u8 *addr, u32 supp_rates) { + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct sta_info *sta; int band = local->hw.conf.channel->band; @@ -397,6 +398,9 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, return NULL; } + if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) + return NULL; + if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) return NULL; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 419f186cfcf0..91dc8636d644 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -746,6 +746,7 @@ struct ieee80211_local { unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ bool pspolling; + bool scan_ps_enabled; /* * PS can only be enabled when we have exactly one managed * interface (and monitors) in PS, this then points there. diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 80c16f6e2af6..32abae3ce32a 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -15,12 +15,14 @@ #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <net/mac80211.h> +#include <net/ieee80211_radiotap.h> #include "ieee80211_i.h" #include "sta_info.h" #include "debugfs_netdev.h" #include "mesh.h" #include "led.h" #include "driver-ops.h" +#include "wme.h" /** * DOC: Interface list locking @@ -314,7 +316,7 @@ static int ieee80211_open(struct net_device *dev) if (sdata->vif.type == NL80211_IFTYPE_STATION) ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); - netif_start_queue(dev); + netif_tx_start_all_queues(dev); return 0; err_del_interface: @@ -343,7 +345,7 @@ static int ieee80211_stop(struct net_device *dev) /* * Stop TX on this interface first. */ - netif_stop_queue(dev); + netif_tx_stop_all_queues(dev); /* * Now delete all active aggregation sessions. @@ -644,6 +646,12 @@ static void ieee80211_teardown_sdata(struct net_device *dev) WARN_ON(flushed); } +static u16 ieee80211_netdev_select_queue(struct net_device *dev, + struct sk_buff *skb) +{ + return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); +} + static const struct net_device_ops ieee80211_dataif_ops = { .ndo_open = ieee80211_open, .ndo_stop = ieee80211_stop, @@ -652,8 +660,38 @@ static const struct net_device_ops ieee80211_dataif_ops = { .ndo_set_multicast_list = ieee80211_set_multicast_list, .ndo_change_mtu = ieee80211_change_mtu, .ndo_set_mac_address = eth_mac_addr, + .ndo_select_queue = ieee80211_netdev_select_queue, }; +static u16 ieee80211_monitor_select_queue(struct net_device *dev, + struct sk_buff *skb) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct ieee80211_hdr *hdr; + struct ieee80211_radiotap_header *rtap = (void *)skb->data; + u8 *p; + + if (local->hw.queues < 4) + return 0; + + if (skb->len < 4 || + skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */) + return 0; /* doesn't matter, frame will be dropped */ + + hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); + + if (!ieee80211_is_data_qos(hdr->frame_control)) { + skb->priority = 7; + return ieee802_1d_to_ac[skb->priority]; + } + + p = ieee80211_get_qos_ctl(hdr); + skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; + + return ieee80211_downgrade_queue(local, skb); +} + static const struct net_device_ops ieee80211_monitorif_ops = { .ndo_open = ieee80211_open, .ndo_stop = ieee80211_stop, @@ -662,6 +700,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { .ndo_set_multicast_list = ieee80211_set_multicast_list, .ndo_change_mtu = ieee80211_change_mtu, .ndo_set_mac_address = eth_mac_addr, + .ndo_select_queue = ieee80211_monitor_select_queue, }; static void ieee80211_if_setup(struct net_device *dev) @@ -768,8 +807,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, ASSERT_RTNL(); - ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size, - name, ieee80211_if_setup); + ndev = alloc_netdev_mq(sizeof(*sdata) + local->hw.vif_data_size, + name, ieee80211_if_setup, local->hw.queues); if (!ndev) return -ENOMEM; dev_net_set(ndev, wiphy_net(local->hw.wiphy)); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 8116d1a96a4a..0d2d94881f1f 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -515,6 +515,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) * and we need some headroom for passing the frame to monitor * interfaces, but never both at the same time. */ + BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM != + sizeof(struct ieee80211_tx_status_rtap_hdr)); local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, sizeof(struct ieee80211_tx_status_rtap_hdr)); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index c0fe46493f71..6a4331429598 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -427,7 +427,7 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, char *addr5, char *addr6) { int aelen = 0; - memset(meshhdr, 0, sizeof(meshhdr)); + memset(meshhdr, 0, sizeof(*meshhdr)); meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum); sdata->u.mesh.mesh_seqnum++; diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 31e102541869..85562c59d7d6 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -188,8 +188,9 @@ struct mesh_rmc { */ #define MESH_PREQ_MIN_INT 10 #define MESH_DIAM_TRAVERSAL_TIME 50 -/* Paths will be refreshed if they are closer than PATH_REFRESH_TIME to their - * expiration +/* A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds before + * timing out. This way it will remain ACTIVE and no data frames will be + * unnecesarily held in the pending queue. */ #define MESH_PATH_REFRESH_TIME 1000 #define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME) diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 833b2f3670c5..d28acb6b1f81 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -937,7 +937,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb, if (mpath->flags & MESH_PATH_ACTIVE) { if (time_after(jiffies, - mpath->exp_time + + mpath->exp_time - msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && !memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) && !(mpath->flags & MESH_PATH_RESOLVING) && diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 6dc7b5ad9a41..05a18f43e1bf 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -915,6 +915,14 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | IEEE80211_STA_BEACON_POLL); + /* + * Always handle WMM once after association regardless + * of the first value the AP uses. Setting -1 here has + * that effect because the AP values is an unsigned + * 4-bit value. + */ + sdata->u.mgd.wmm_last_param_set = -1; + ieee80211_led_assoc(local, 1); sdata->vif.bss_conf.assoc = 1; @@ -934,7 +942,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, ieee80211_recalc_ps(local, -1); mutex_unlock(&local->iflist_mtx); - netif_start_queue(sdata->dev); + netif_tx_start_all_queues(sdata->dev); netif_carrier_on(sdata->dev); } @@ -1066,7 +1074,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, * time -- we don't want the scan code to enable queues. */ - netif_stop_queue(sdata->dev); + netif_tx_stop_all_queues(sdata->dev); netif_carrier_off(sdata->dev); rcu_read_lock(); @@ -1083,8 +1091,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, ieee80211_set_wmm_default(sdata); - ieee80211_recalc_idle(local); - /* channel(_type) changes are handled by ieee80211_hw_config */ local->oper_channel_type = NL80211_CHAN_NO_HT; @@ -1370,6 +1376,7 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, if (!wk) { ieee80211_set_disassoc(sdata, true); + ieee80211_recalc_idle(sdata->local); } else { list_del(&wk->list); kfree(wk); @@ -1403,6 +1410,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, sdata->dev->name, mgmt->sa, reason_code); ieee80211_set_disassoc(sdata, false); + ieee80211_recalc_idle(sdata->local); return RX_MGMT_CFG80211_DISASSOC; } @@ -1955,7 +1963,9 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ACTION: - /* XXX: differentiate, can only happen for CSA now! */ + if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) + break; + ieee80211_sta_process_chanswitch(sdata, &mgmt->u.action.u.chan_switch.sw_elem, ifmgd->associated); @@ -2117,6 +2127,7 @@ static void ieee80211_sta_work(struct work_struct *work) " after %dms, disconnecting.\n", bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); ieee80211_set_disassoc(sdata, true); + ieee80211_recalc_idle(local); mutex_unlock(&ifmgd->mtx); /* * must be outside lock due to cfg80211, @@ -2560,6 +2571,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, IEEE80211_STYPE_DEAUTH, req->reason_code, cookie); + ieee80211_recalc_idle(sdata->local); + return 0; } @@ -2592,5 +2605,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, ieee80211_send_deauth_disassoc(sdata, req->bss->bssid, IEEE80211_STYPE_DISASSOC, req->reason_code, cookie); + + ieee80211_recalc_idle(sdata->local); + return 0; } diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c index 699d3ed869c4..29bc4c516238 100644 --- a/net/mac80211/rc80211_pid_algo.c +++ b/net/mac80211/rc80211_pid_algo.c @@ -190,7 +190,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo, rate_control_pid_normalize(pinfo, sband->n_bitrates); /* Compute the proportional, integral and derivative errors. */ - err_prop = (pinfo->target << RC_PID_ARITH_SHIFT) - pf; + err_prop = (pinfo->target - pf) << RC_PID_ARITH_SHIFT; err_avg = spinfo->err_avg_sc >> pinfo->smoothing_shift; spinfo->err_avg_sc = spinfo->err_avg_sc - err_avg + err_prop; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index f237df408378..82a30c1bf3ab 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1712,7 +1712,6 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) mpp_path_add(proxied_addr, mpp_addr, sdata); } else { spin_lock_bh(&mppath->state_lock); - mppath->exp_time = jiffies; if (compare_ether_addr(mppath->mpp, mpp_addr) != 0) memcpy(mppath->mpp, mpp_addr, ETH_ALEN); spin_unlock_bh(&mppath->state_lock); @@ -1747,7 +1746,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) memset(info, 0, sizeof(*info)); info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; info->control.vif = &rx->sdata->vif; - ieee80211_select_queue(local, fwd_skb); + skb_set_queue_mapping(skb, + ieee80211_select_queue(rx->sdata, fwd_skb)); + ieee80211_set_qos_hdr(local, skb); if (is_multicast_ether_addr(fwd_hdr->addr1)) IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, fwded_mcast); @@ -2014,6 +2015,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) } break; default: + /* do not process rejected action frames */ + if (mgmt->u.action.category & 0x80) + return RX_DROP_MONITOR; + return RX_CONTINUE; } diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 4cf387c944bf..f934c9620b73 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -227,7 +227,8 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; - bool ps = false; + + local->scan_ps_enabled = false; /* FIXME: what to do when local->pspolling is true? */ @@ -235,12 +236,13 @@ static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata) cancel_work_sync(&local->dynamic_ps_enable_work); if (local->hw.conf.flags & IEEE80211_CONF_PS) { - ps = true; + local->scan_ps_enabled = true; local->hw.conf.flags &= ~IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } - if (!ps || !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) + if (!(local->scan_ps_enabled) || + !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) /* * If power save was enabled, no need to send a nullfunc * frame because AP knows that we are sleeping. But if the @@ -261,7 +263,7 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata) if (!local->ps_sdata) ieee80211_send_nullfunc(local, sdata, 0); - else { + else if (local->scan_ps_enabled) { /* * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware * will send a nullfunc frame with the powersave bit set @@ -277,6 +279,16 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata) */ local->hw.conf.flags |= IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + } else if (local->hw.conf.dynamic_ps_timeout > 0) { + /* + * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer + * had been running before leaving the operating channel, + * restart the timer now and send a nullfunc frame to inform + * the AP that we are awake. + */ + ieee80211_send_nullfunc(local, sdata, 0); + mod_timer(&local->dynamic_ps_timer, jiffies + + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); } } @@ -341,10 +353,10 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) if (sdata->vif.type == NL80211_IFTYPE_STATION) { if (sdata->u.mgd.associated) { ieee80211_scan_ps_disable(sdata); - netif_wake_queue(sdata->dev); + netif_tx_wake_all_queues(sdata->dev); } } else - netif_wake_queue(sdata->dev); + netif_tx_wake_all_queues(sdata->dev); /* re-enable beaconing */ if (sdata->vif.type == NL80211_IFTYPE_AP || @@ -399,7 +411,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) * are handled in the scan state machine */ if (sdata->vif.type != NL80211_IFTYPE_STATION) - netif_stop_queue(sdata->dev); + netif_tx_stop_all_queues(sdata->dev); } mutex_unlock(&local->iflist_mtx); @@ -563,7 +575,7 @@ static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *loca continue; if (sdata->vif.type == NL80211_IFTYPE_STATION) { - netif_stop_queue(sdata->dev); + netif_tx_stop_all_queues(sdata->dev); if (sdata->u.mgd.associated) ieee80211_scan_ps_enable(sdata); } @@ -598,7 +610,7 @@ static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *loca if (sdata->vif.type == NL80211_IFTYPE_STATION) { if (sdata->u.mgd.associated) ieee80211_scan_ps_disable(sdata); - netif_wake_queue(sdata->dev); + netif_tx_wake_all_queues(sdata->dev); } } mutex_unlock(&local->iflist_mtx); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 8834cc93c716..ac210b586702 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1419,6 +1419,10 @@ static bool need_dynamic_ps(struct ieee80211_local *local) if (!local->ps_sdata) return false; + /* No point if we're going to suspend */ + if (local->quiescing) + return false; + return true; } @@ -1508,7 +1512,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, return; } - ieee80211_select_queue(local, skb); + ieee80211_set_qos_hdr(local, skb); ieee80211_tx(sdata, skb, false); rcu_read_unlock(); } @@ -2287,6 +2291,9 @@ void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) skb_set_network_header(skb, 0); skb_set_transport_header(skb, 0); + /* send all internal mgmt frames on VO */ + skb_set_queue_mapping(skb, 0); + /* * The other path calling ieee80211_xmit is from the tasklet, * and while we can handle concurrent transmissions locking diff --git a/net/mac80211/util.c b/net/mac80211/util.c index d09f78bb2442..3848140313f5 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -269,6 +269,7 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason) { struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata; if (WARN_ON(queue >= hw->queues)) return; @@ -281,6 +282,11 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, if (!skb_queue_empty(&local->pending[queue])) tasklet_schedule(&local->tx_pending_tasklet); + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) + netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); + rcu_read_unlock(); } void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, @@ -305,11 +311,17 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason) { struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata; if (WARN_ON(queue >= hw->queues)) return; __set_bit(reason, &local->queue_stop_reasons[queue]); + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) + netif_tx_stop_queue(netdev_get_tx_queue(sdata->dev, queue)); + rcu_read_unlock(); } void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, @@ -579,7 +591,7 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, if (elen > left) break; - if (calc_crc && id < 64 && (filter & BIT(id))) + if (calc_crc && id < 64 && (filter & (1ULL << id))) crc = crc32_be(crc, pos - 2, elen + 2); switch (id) { @@ -1039,7 +1051,19 @@ int ieee80211_reconfig(struct ieee80211_local *local) /* restart hardware */ if (local->open_count) { + /* + * Upon resume hardware can sometimes be goofy due to + * various platform / driver / bus issues, so restarting + * the device may at times not work immediately. Propagate + * the error. + */ res = drv_start(local); + if (res) { + WARN(local->suspended, "Harware became unavailable " + "upon resume. This is could be a software issue" + "prior to suspend or a harware issue\n"); + return res; + } ieee80211_led_radio(local, true); } diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index b19b7696f3a2..79d887dae738 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -44,22 +44,69 @@ static int wme_downgrade_ac(struct sk_buff *skb) } -/* Indicate which queue to use. */ -static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb) +/* Indicate which queue to use. */ +u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta = NULL; + u32 sta_flags = 0; + const u8 *ra = NULL; + bool qos = false; - if (!ieee80211_is_data(hdr->frame_control)) { - /* management frames go on AC_VO queue, but are sent - * without QoS control fields */ - return 0; + if (local->hw.queues < 4 || skb->len < 6) { + skb->priority = 0; /* required for correct WPA/11i MIC */ + return min_t(u16, local->hw.queues - 1, + ieee802_1d_to_ac[skb->priority]); + } + + rcu_read_lock(); + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + rcu_read_lock(); + sta = rcu_dereference(sdata->u.vlan.sta); + if (sta) + sta_flags = get_sta_flags(sta); + rcu_read_unlock(); + if (sta) + break; + case NL80211_IFTYPE_AP: + ra = skb->data; + break; + case NL80211_IFTYPE_WDS: + ra = sdata->u.wds.remote_addr; + break; +#ifdef CONFIG_MAC80211_MESH + case NL80211_IFTYPE_MESH_POINT: + /* + * XXX: This is clearly broken ... but already was before, + * because ieee80211_fill_mesh_addresses() would clear A1 + * except for multicast addresses. + */ + break; +#endif + case NL80211_IFTYPE_STATION: + ra = sdata->u.mgd.bssid; + break; + case NL80211_IFTYPE_ADHOC: + ra = skb->data; + break; + default: + break; } - if (0 /* injected */) { - /* use AC from radiotap */ + if (!sta && ra && !is_multicast_ether_addr(ra)) { + sta = sta_info_get(local, ra); + if (sta) + sta_flags = get_sta_flags(sta); } - if (!ieee80211_is_data_qos(hdr->frame_control)) { + if (sta_flags & WLAN_STA_WME) + qos = true; + + rcu_read_unlock(); + + if (!qos) { skb->priority = 0; /* required for correct WPA/11i MIC */ return ieee802_1d_to_ac[skb->priority]; } @@ -68,6 +115,12 @@ static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb) * data frame has */ skb->priority = cfg80211_classify8021d(skb); + return ieee80211_downgrade_queue(local, skb); +} + +u16 ieee80211_downgrade_queue(struct ieee80211_local *local, + struct sk_buff *skb) +{ /* in case we are a client verify acm is not set for this ac */ while (unlikely(local->wmm_acm & BIT(skb->priority))) { if (wme_downgrade_ac(skb)) { @@ -85,24 +138,17 @@ static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb) return ieee802_1d_to_ac[skb->priority]; } -void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb) +void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - u16 queue; - u8 tid; - - queue = classify80211(local, skb); - if (unlikely(queue >= local->hw.queues)) - queue = local->hw.queues - 1; - - /* - * Now we know the 1d priority, fill in the QoS header if - * there is one (and we haven't done this before). - */ + struct ieee80211_hdr *hdr = (void *)skb->data; + + /* Fill in the QoS header if there is one. */ if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *p = ieee80211_get_qos_ctl(hdr); - u8 ack_policy = 0; + u8 ack_policy = 0, tid; + tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; + if (unlikely(local->wifi_wme_noack_test)) ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << QOS_CONTROL_ACK_POLICY_SHIFT; @@ -110,6 +156,4 @@ void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb) *p++ = ack_policy | tid; *p = 0; } - - skb_set_queue_mapping(skb, queue); } diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h index d4fd87ca5118..6053b1c9feee 100644 --- a/net/mac80211/wme.h +++ b/net/mac80211/wme.h @@ -20,7 +20,11 @@ extern const int ieee802_1d_to_ac[8]; -void ieee80211_select_queue(struct ieee80211_local *local, - struct sk_buff *skb); +u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb); +u16 ieee80211_downgrade_queue(struct ieee80211_local *local, + struct sk_buff *skb); + #endif /* _WME_H */ diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index eb0ceb846527..fc70a49c0afd 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -482,8 +482,7 @@ static ssize_t recent_old_proc_write(struct file *file, if (copy_from_user(buf, input, size)) return -EFAULT; - while (isspace(*c)) - c++; + c = skip_spaces(c); if (size - (c - buf) < 5) return c - buf; diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index aacba76070fc..e2e2d33cafdf 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c @@ -843,12 +843,13 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) dptr = skb_push(skb, 1); *dptr = AX25_P_NETROM; - ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); - if (nr_neigh->ax25 && ax25s) { - /* We were already holding this ax25_cb */ + ax25s = nr_neigh->ax25; + nr_neigh->ax25 = ax25_send_frame(skb, 256, + (ax25_address *)dev->dev_addr, + &nr_neigh->callsign, + nr_neigh->digipeat, nr_neigh->dev); + if (ax25s) ax25_cb_put(ax25s); - } - nr_neigh->ax25 = ax25s; dev_put(dev); ret = (nr_neigh->ax25 != NULL); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 020562164b56..f126d18dbdc4 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -415,7 +415,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, { struct sock *sk = sock->sk; struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name; - struct sk_buff *skb; + struct sk_buff *skb = NULL; struct net_device *dev; __be16 proto = 0; int err; @@ -437,6 +437,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, */ saddr->spkt_device[13] = 0; +retry: rcu_read_lock(); dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); err = -ENODEV; @@ -456,58 +457,48 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, if (len > dev->mtu + dev->hard_header_len) goto out_unlock; - err = -ENOBUFS; - skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL); - - /* - * If the write buffer is full, then tough. At this level the user - * gets to deal with the problem - do your own algorithmic backoffs. - * That's far more flexible. - */ - - if (skb == NULL) - goto out_unlock; - - /* - * Fill it in - */ - - /* FIXME: Save some space for broken drivers that write a - * hard header at transmission time by themselves. PPP is the - * notable one here. This should really be fixed at the driver level. - */ - skb_reserve(skb, LL_RESERVED_SPACE(dev)); - skb_reset_network_header(skb); - - /* Try to align data part correctly */ - if (dev->header_ops) { - skb->data -= dev->hard_header_len; - skb->tail -= dev->hard_header_len; - if (len < dev->hard_header_len) - skb_reset_network_header(skb); + if (!skb) { + size_t reserved = LL_RESERVED_SPACE(dev); + unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; + + rcu_read_unlock(); + skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL); + if (skb == NULL) + return -ENOBUFS; + /* FIXME: Save some space for broken drivers that write a hard + * header at transmission time by themselves. PPP is the notable + * one here. This should really be fixed at the driver level. + */ + skb_reserve(skb, reserved); + skb_reset_network_header(skb); + + /* Try to align data part correctly */ + if (hhlen) { + skb->data -= hhlen; + skb->tail -= hhlen; + if (len < hhlen) + skb_reset_network_header(skb); + } + err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); + if (err) + goto out_free; + goto retry; } - /* Returns -EFAULT on error */ - err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); + skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; - if (err) - goto out_free; - - /* - * Now send it - */ dev_queue_xmit(skb); rcu_read_unlock(); return len; -out_free: - kfree_skb(skb); out_unlock: rcu_read_unlock(); +out_free: + kfree_skb(skb); return err; } @@ -1030,8 +1021,20 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) status = TP_STATUS_SEND_REQUEST; err = dev_queue_xmit(skb); - if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0)) - goto out_xmit; + if (unlikely(err > 0)) { + err = net_xmit_errno(err); + if (err && __packet_get_status(po, ph) == + TP_STATUS_AVAILABLE) { + /* skb was destructed already */ + skb = NULL; + goto out_status; + } + /* + * skb was dropped but not destructed yet; + * let's treat it like congestion or err < 0 + */ + err = 0; + } packet_increment_head(&po->tx_ring); len_sum += tp_len; } while (likely((ph != NULL) || @@ -1042,9 +1045,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) err = len_sum; goto out_put; -out_xmit: - skb->destructor = sock_wfree; - atomic_dec(&po->tx_ring.pending); out_status: __packet_set_status(po, ph, status); kfree_skb(skb); diff --git a/net/rds/ib.c b/net/rds/ib.c index 536ebe5d3f6b..3b8992361042 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -182,8 +182,8 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn, ic = conn->c_transport_data; dev_addr = &ic->i_cm_id->route.addr.dev_addr; - ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); - ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); + rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); + rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); iinfo->max_send_wr = ic->i_send_ring.w_nr; diff --git a/net/rds/iw.c b/net/rds/iw.c index db224f7c2937..b28fa8525b24 100644 --- a/net/rds/iw.c +++ b/net/rds/iw.c @@ -184,8 +184,8 @@ static int rds_iw_conn_info_visitor(struct rds_connection *conn, ic = conn->c_transport_data; dev_addr = &ic->i_cm_id->route.addr.dev_addr; - ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); - ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); + rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); + rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); iinfo->max_send_wr = ic->i_send_ring.w_nr; diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 448e5a0fcc2e..c218e07e5caf 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -579,6 +579,8 @@ static ssize_t rfkill_name_show(struct device *dev, static const char *rfkill_get_type_str(enum rfkill_type type) { + BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1); + switch (type) { case RFKILL_TYPE_WLAN: return "wlan"; @@ -597,8 +599,6 @@ static const char *rfkill_get_type_str(enum rfkill_type type) default: BUG(); } - - BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1); } static ssize_t rfkill_type_show(struct device *dev, diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c index bd86a63960ce..5ef5f6988a2e 100644 --- a/net/rose/rose_link.c +++ b/net/rose/rose_link.c @@ -101,13 +101,17 @@ static void rose_t0timer_expiry(unsigned long param) static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) { ax25_address *rose_call; + ax25_cb *ax25s; if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) rose_call = (ax25_address *)neigh->dev->dev_addr; else rose_call = &rose_callsign; + ax25s = neigh->ax25; neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); + if (ax25s) + ax25_cb_put(ax25s); return (neigh->ax25 != NULL); } @@ -120,13 +124,17 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) static int rose_link_up(struct rose_neigh *neigh) { ax25_address *rose_call; + ax25_cb *ax25s; if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) rose_call = (ax25_address *)neigh->dev->dev_addr; else rose_call = &rose_callsign; + ax25s = neigh->ax25; neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); + if (ax25s) + ax25_cb_put(ax25s); return (neigh->ax25 != NULL); } diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c index 114df6eec8c3..968e8bac1b5d 100644 --- a/net/rose/rose_loopback.c +++ b/net/rose/rose_loopback.c @@ -75,7 +75,7 @@ static void rose_loopback_timer(unsigned long param) lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); frametype = skb->data[2]; dest = (rose_address *)(skb->data + 4); - lci_o = 0xFFF - lci_i; + lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i; skb_reset_transport_header(skb); diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 795c4b025e31..70a0b3b4b4d2 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -235,6 +235,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh) if ((s = rose_neigh_list) == rose_neigh) { rose_neigh_list = rose_neigh->next; + if (rose_neigh->ax25) + ax25_cb_put(rose_neigh->ax25); kfree(rose_neigh->digipeat); kfree(rose_neigh); return; @@ -243,6 +245,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh) while (s != NULL && s->next != NULL) { if (s->next == rose_neigh) { s->next = rose_neigh->next; + if (rose_neigh->ax25) + ax25_cb_put(rose_neigh->ax25); kfree(rose_neigh->digipeat); kfree(rose_neigh); return; @@ -812,6 +816,7 @@ void rose_link_failed(ax25_cb *ax25, int reason) if (rose_neigh != NULL) { rose_neigh->ax25 = NULL; + ax25_cb_put(ax25); rose_del_route_by_neigh(rose_neigh); rose_kill_by_neigh(rose_neigh); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 89ab66e54740..67fdac9d2d33 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2087,8 +2087,7 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, if (copy_from_user(&sp->autoclose, optval, optlen)) return -EFAULT; /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */ - if (sp->autoclose > (MAX_SCHEDULE_TIMEOUT / HZ) ) - sp->autoclose = (__u32)(MAX_SCHEDULE_TIMEOUT / HZ) ; + sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ); return 0; } diff --git a/net/socket.c b/net/socket.c index b94c3dd71015..769c386bd428 100644 --- a/net/socket.c +++ b/net/socket.c @@ -312,18 +312,6 @@ static struct file_system_type sock_fs_type = { .kill_sb = kill_anon_super, }; -static int sockfs_delete_dentry(struct dentry *dentry) -{ - /* - * At creation time, we pretended this dentry was hashed - * (by clearing DCACHE_UNHASHED bit in d_flags) - * At delete time, we restore the truth : not hashed. - * (so that dput() can proceed correctly) - */ - dentry->d_flags |= DCACHE_UNHASHED; - return 0; -} - /* * sockfs_dname() is called from d_path(). */ @@ -334,7 +322,6 @@ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) } static const struct dentry_operations sockfs_dentry_operations = { - .d_delete = sockfs_delete_dentry, .d_dname = sockfs_dname, }; @@ -355,68 +342,55 @@ static const struct dentry_operations sockfs_dentry_operations = { * but we take care of internal coherence yet. */ -static int sock_alloc_fd(struct file **filep, int flags) +static int sock_alloc_file(struct socket *sock, struct file **f, int flags) { + struct qstr name = { .name = "" }; + struct path path; + struct file *file; int fd; fd = get_unused_fd_flags(flags); - if (likely(fd >= 0)) { - struct file *file = get_empty_filp(); - - *filep = file; - if (unlikely(!file)) { - put_unused_fd(fd); - return -ENFILE; - } - } else - *filep = NULL; - return fd; -} - -static int sock_attach_fd(struct socket *sock, struct file *file, int flags) -{ - struct dentry *dentry; - struct qstr name = { .name = "" }; + if (unlikely(fd < 0)) + return fd; - dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name); - if (unlikely(!dentry)) + path.dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name); + if (unlikely(!path.dentry)) { + put_unused_fd(fd); return -ENOMEM; + } + path.mnt = mntget(sock_mnt); - dentry->d_op = &sockfs_dentry_operations; - /* - * We dont want to push this dentry into global dentry hash table. - * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED - * This permits a working /proc/$pid/fd/XXX on sockets - */ - dentry->d_flags &= ~DCACHE_UNHASHED; - d_instantiate(dentry, SOCK_INODE(sock)); + path.dentry->d_op = &sockfs_dentry_operations; + d_instantiate(path.dentry, SOCK_INODE(sock)); + SOCK_INODE(sock)->i_fop = &socket_file_ops; - sock->file = file; - init_file(file, sock_mnt, dentry, FMODE_READ | FMODE_WRITE, + file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &socket_file_ops); - SOCK_INODE(sock)->i_fop = &socket_file_ops; + if (unlikely(!file)) { + /* drop dentry, keep inode */ + atomic_inc(&path.dentry->d_inode->i_count); + path_put(&path); + put_unused_fd(fd); + return -ENFILE; + } + + sock->file = file; file->f_flags = O_RDWR | (flags & O_NONBLOCK); file->f_pos = 0; file->private_data = sock; - return 0; + *f = file; + return fd; } int sock_map_fd(struct socket *sock, int flags) { struct file *newfile; - int fd = sock_alloc_fd(&newfile, flags); + int fd = sock_alloc_file(sock, &newfile, flags); - if (likely(fd >= 0)) { - int err = sock_attach_fd(sock, newfile, flags); - - if (unlikely(err < 0)) { - put_filp(newfile); - put_unused_fd(fd); - return err; - } + if (likely(fd >= 0)) fd_install(fd, newfile); - } + return fd; } @@ -1390,29 +1364,19 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol, if (err < 0) goto out_release_both; - fd1 = sock_alloc_fd(&newfile1, flags & O_CLOEXEC); + fd1 = sock_alloc_file(sock1, &newfile1, flags); if (unlikely(fd1 < 0)) { err = fd1; goto out_release_both; } - fd2 = sock_alloc_fd(&newfile2, flags & O_CLOEXEC); + fd2 = sock_alloc_file(sock2, &newfile2, flags); if (unlikely(fd2 < 0)) { err = fd2; - put_filp(newfile1); - put_unused_fd(fd1); - goto out_release_both; - } - - err = sock_attach_fd(sock1, newfile1, flags & O_NONBLOCK); - if (unlikely(err < 0)) { - goto out_fd2; - } - - err = sock_attach_fd(sock2, newfile2, flags & O_NONBLOCK); - if (unlikely(err < 0)) { fput(newfile1); - goto out_fd1; + put_unused_fd(fd1); + sock_release(sock2); + goto out; } audit_fd_pair(fd1, fd2); @@ -1438,16 +1402,6 @@ out_release_1: sock_release(sock1); out: return err; - -out_fd2: - put_filp(newfile1); - sock_release(sock1); -out_fd1: - put_filp(newfile2); - sock_release(sock2); - put_unused_fd(fd1); - put_unused_fd(fd2); - goto out; } /* @@ -1551,17 +1505,13 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, */ __module_get(newsock->ops->owner); - newfd = sock_alloc_fd(&newfile, flags & O_CLOEXEC); + newfd = sock_alloc_file(newsock, &newfile, flags); if (unlikely(newfd < 0)) { err = newfd; sock_release(newsock); goto out_put; } - err = sock_attach_fd(newsock, newfile, flags & O_NONBLOCK); - if (err < 0) - goto out_fd_simple; - err = security_socket_accept(sock, newsock); if (err) goto out_fd; @@ -1591,11 +1541,6 @@ out_put: fput_light(sock->file, fput_needed); out: return err; -out_fd_simple: - sock_release(newsock); - put_filp(newfile); - put_unused_fd(newfd); - goto out_put; out_fd: fput(newfile); put_unused_fd(newfd); diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c index c7450c8f0a7c..6dcdd2517819 100644 --- a/net/sunrpc/addr.c +++ b/net/sunrpc/addr.c @@ -55,16 +55,8 @@ static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap, /* * RFC 4291, Section 2.2.1 - * - * To keep the result as short as possible, especially - * since we don't shorthand, we don't want leading zeros - * in each halfword, so avoid %pI6. */ - return snprintf(buf, buflen, "%x:%x:%x:%x:%x:%x:%x:%x", - ntohs(addr->s6_addr16[0]), ntohs(addr->s6_addr16[1]), - ntohs(addr->s6_addr16[2]), ntohs(addr->s6_addr16[3]), - ntohs(addr->s6_addr16[4]), ntohs(addr->s6_addr16[5]), - ntohs(addr->s6_addr16[6]), ntohs(addr->s6_addr16[7])); + return snprintf(buf, buflen, "%pI6c", addr); } static size_t rpc_ntop6(const struct sockaddr *sap, diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 7535a7bed2fa..f394fc190a49 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -123,16 +123,19 @@ rpcauth_unhash_cred_locked(struct rpc_cred *cred) clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); } -static void +static int rpcauth_unhash_cred(struct rpc_cred *cred) { spinlock_t *cache_lock; + int ret; cache_lock = &cred->cr_auth->au_credcache->lock; spin_lock(cache_lock); - if (atomic_read(&cred->cr_count) == 0) + ret = atomic_read(&cred->cr_count) == 0; + if (ret) rpcauth_unhash_cred_locked(cred); spin_unlock(cache_lock); + return ret; } /* @@ -446,31 +449,35 @@ void put_rpccred(struct rpc_cred *cred) { /* Fast path for unhashed credentials */ - if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) - goto need_lock; - - if (!atomic_dec_and_test(&cred->cr_count)) + if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) == 0) { + if (atomic_dec_and_test(&cred->cr_count)) + cred->cr_ops->crdestroy(cred); return; - goto out_destroy; -need_lock: + } + if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock)) return; if (!list_empty(&cred->cr_lru)) { number_cred_unused--; list_del_init(&cred->cr_lru); } - if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) - rpcauth_unhash_cred(cred); if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { - cred->cr_expire = jiffies; - list_add_tail(&cred->cr_lru, &cred_unused); - number_cred_unused++; - spin_unlock(&rpc_credcache_lock); - return; + if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0) { + cred->cr_expire = jiffies; + list_add_tail(&cred->cr_lru, &cred_unused); + number_cred_unused++; + goto out_nodestroy; + } + if (!rpcauth_unhash_cred(cred)) { + /* We were hashed and someone looked us up... */ + goto out_nodestroy; + } } spin_unlock(&rpc_credcache_lock); -out_destroy: cred->cr_ops->crdestroy(cred); + return; +out_nodestroy: + spin_unlock(&rpc_credcache_lock); } EXPORT_SYMBOL_GPL(put_rpccred); diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index fc6a43ccd950..f7a7f8380e38 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -304,7 +304,7 @@ __gss_find_upcall(struct rpc_inode *rpci, uid_t uid) * to that upcall instead of adding the new upcall. */ static inline struct gss_upcall_msg * -gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg) +gss_add_msg(struct gss_upcall_msg *gss_msg) { struct rpc_inode *rpci = gss_msg->inode; struct inode *inode = &rpci->vfs_inode; @@ -445,7 +445,7 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred); if (IS_ERR(gss_new)) return gss_new; - gss_msg = gss_add_msg(gss_auth, gss_new); + gss_msg = gss_add_msg(gss_new); if (gss_msg == gss_new) { struct inode *inode = &gss_new->inode->vfs_inode; int res = rpc_queue_upcall(inode, &gss_new->msg); @@ -485,7 +485,7 @@ gss_refresh_upcall(struct rpc_task *task) dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, cred->cr_uid); gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); - if (IS_ERR(gss_msg) == -EAGAIN) { + if (PTR_ERR(gss_msg) == -EAGAIN) { /* XXX: warning on the first, under the assumption we * shouldn't normally hit this case on a refresh. */ warn_gssd(); @@ -644,7 +644,22 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); if (IS_ERR(p)) { err = PTR_ERR(p); - gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES; + switch (err) { + case -EACCES: + gss_msg->msg.errno = err; + err = mlen; + break; + case -EFAULT: + case -ENOMEM: + case -EINVAL: + case -ENOSYS: + gss_msg->msg.errno = -EAGAIN; + break; + default: + printk(KERN_CRIT "%s: bad return from " + "gss_fill_context: %zd\n", __func__, err); + BUG(); + } goto err_release_msg; } gss_msg->ctx = gss_get_ctx(ctx); diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index ef45eba22485..2deb0ed72ff4 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -131,8 +131,10 @@ gss_import_sec_context_kerberos(const void *p, struct krb5_ctx *ctx; int tmp; - if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) + if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) { + p = ERR_PTR(-ENOMEM); goto out_err; + } p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); if (IS_ERR(p)) diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 6efbb0cd3c7c..76e4c6f4ac3c 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -252,7 +252,7 @@ gss_import_sec_context(const void *input_token, size_t bufsize, struct gss_ctx **ctx_id) { if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL))) - return GSS_S_FAILURE; + return -ENOMEM; (*ctx_id)->mech_type = gss_mech_get(mech); return mech->gm_ops diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 38829e20500b..154034b675bd 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -79,7 +79,7 @@ static void call_connect_status(struct rpc_task *task); static __be32 *rpc_encode_header(struct rpc_task *task); static __be32 *rpc_verify_header(struct rpc_task *task); -static int rpc_ping(struct rpc_clnt *clnt, int flags); +static int rpc_ping(struct rpc_clnt *clnt); static void rpc_register_client(struct rpc_clnt *clnt) { @@ -340,7 +340,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) return clnt; if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { - int err = rpc_ping(clnt, RPC_TASK_SOFT); + int err = rpc_ping(clnt); if (err != 0) { rpc_shutdown_client(clnt); return ERR_PTR(err); @@ -528,7 +528,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, clnt->cl_prog = program->number; clnt->cl_vers = version->number; clnt->cl_stats = program->stats; - err = rpc_ping(clnt, RPC_TASK_SOFT); + err = rpc_ping(clnt); if (err != 0) { rpc_shutdown_client(clnt); clnt = ERR_PTR(err); @@ -1060,7 +1060,7 @@ call_bind_status(struct rpc_task *task) goto retry_timeout; case -EPFNOSUPPORT: /* server doesn't support any rpcbind version we know of */ - dprintk("RPC: %5u remote rpcbind service unavailable\n", + dprintk("RPC: %5u unrecognized remote rpcbind service\n", task->tk_pid); break; case -EPROTONOSUPPORT: @@ -1069,6 +1069,21 @@ call_bind_status(struct rpc_task *task) task->tk_status = 0; task->tk_action = call_bind; return; + case -ECONNREFUSED: /* connection problems */ + case -ECONNRESET: + case -ENOTCONN: + case -EHOSTDOWN: + case -EHOSTUNREACH: + case -ENETUNREACH: + case -EPIPE: + dprintk("RPC: %5u remote rpcbind unreachable: %d\n", + task->tk_pid, task->tk_status); + if (!RPC_IS_SOFTCONN(task)) { + rpc_delay(task, 5*HZ); + goto retry_timeout; + } + status = task->tk_status; + break; default: dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", task->tk_pid, -task->tk_status); @@ -1180,11 +1195,25 @@ static void call_transmit_status(struct rpc_task *task) { task->tk_action = call_status; + + /* + * Common case: success. Force the compiler to put this + * test first. + */ + if (task->tk_status == 0) { + xprt_end_transmit(task); + rpc_task_force_reencode(task); + return; + } + switch (task->tk_status) { case -EAGAIN: break; default: + dprint_status(task); xprt_end_transmit(task); + rpc_task_force_reencode(task); + break; /* * Special cases: if we've been waiting on the * socket's write_space() callback, or if the @@ -1192,11 +1221,16 @@ call_transmit_status(struct rpc_task *task) * then hold onto the transport lock. */ case -ECONNREFUSED: - case -ECONNRESET: - case -ENOTCONN: case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: + if (RPC_IS_SOFTCONN(task)) { + xprt_end_transmit(task); + rpc_exit(task, task->tk_status); + break; + } + case -ECONNRESET: + case -ENOTCONN: case -EPIPE: rpc_task_force_reencode(task); } @@ -1346,6 +1380,10 @@ call_timeout(struct rpc_task *task) dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); task->tk_timeouts++; + if (RPC_IS_SOFTCONN(task)) { + rpc_exit(task, -ETIMEDOUT); + return; + } if (RPC_IS_SOFT(task)) { if (clnt->cl_chatty) printk(KERN_NOTICE "%s: server %s not responding, timed out\n", @@ -1675,14 +1713,14 @@ static struct rpc_procinfo rpcproc_null = { .p_decode = rpcproc_decode_null, }; -static int rpc_ping(struct rpc_clnt *clnt, int flags) +static int rpc_ping(struct rpc_clnt *clnt) { struct rpc_message msg = { .rpc_proc = &rpcproc_null, }; int err; msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); - err = rpc_call_sync(clnt, &msg, flags); + err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN); put_rpccred(msg.rpc_cred); return err; } diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 830faf4d9997..3e3772d8eb92 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -20,6 +20,7 @@ #include <linux/in6.h> #include <linux/kernel.h> #include <linux/errno.h> +#include <linux/mutex.h> #include <net/ipv6.h> #include <linux/sunrpc/clnt.h> @@ -110,6 +111,9 @@ static void rpcb_getport_done(struct rpc_task *, void *); static void rpcb_map_release(void *data); static struct rpc_program rpcb_program; +static struct rpc_clnt * rpcb_local_clnt; +static struct rpc_clnt * rpcb_local_clnt4; + struct rpcbind_args { struct rpc_xprt * r_xprt; @@ -163,21 +167,60 @@ static const struct sockaddr_in rpcb_inaddr_loopback = { .sin_port = htons(RPCBIND_PORT), }; -static struct rpc_clnt *rpcb_create_local(struct sockaddr *addr, - size_t addrlen, u32 version) +static DEFINE_MUTEX(rpcb_create_local_mutex); + +/* + * Returns zero on success, otherwise a negative errno value + * is returned. + */ +static int rpcb_create_local(void) { struct rpc_create_args args = { - .protocol = XPRT_TRANSPORT_UDP, - .address = addr, - .addrsize = addrlen, + .protocol = XPRT_TRANSPORT_TCP, + .address = (struct sockaddr *)&rpcb_inaddr_loopback, + .addrsize = sizeof(rpcb_inaddr_loopback), .servername = "localhost", .program = &rpcb_program, - .version = version, + .version = RPCBVERS_2, .authflavor = RPC_AUTH_UNIX, .flags = RPC_CLNT_CREATE_NOPING, }; + struct rpc_clnt *clnt, *clnt4; + int result = 0; + + if (rpcb_local_clnt) + return result; + + mutex_lock(&rpcb_create_local_mutex); + if (rpcb_local_clnt) + goto out; + + clnt = rpc_create(&args); + if (IS_ERR(clnt)) { + dprintk("RPC: failed to create local rpcbind " + "client (errno %ld).\n", PTR_ERR(clnt)); + result = -PTR_ERR(clnt); + goto out; + } - return rpc_create(&args); + /* + * This results in an RPC ping. On systems running portmapper, + * the v4 ping will fail. Proceed anyway, but disallow rpcb + * v4 upcalls. + */ + clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4); + if (IS_ERR(clnt4)) { + dprintk("RPC: failed to create local rpcbind v4 " + "cleint (errno %ld).\n", PTR_ERR(clnt4)); + clnt4 = NULL; + } + + rpcb_local_clnt = clnt; + rpcb_local_clnt4 = clnt4; + +out: + mutex_unlock(&rpcb_create_local_mutex); + return result; } static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr, @@ -209,22 +252,13 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr, return rpc_create(&args); } -static int rpcb_register_call(const u32 version, struct rpc_message *msg) +static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg) { - struct sockaddr *addr = (struct sockaddr *)&rpcb_inaddr_loopback; - size_t addrlen = sizeof(rpcb_inaddr_loopback); - struct rpc_clnt *rpcb_clnt; int result, error = 0; msg->rpc_resp = &result; - rpcb_clnt = rpcb_create_local(addr, addrlen, version); - if (!IS_ERR(rpcb_clnt)) { - error = rpc_call_sync(rpcb_clnt, msg, 0); - rpc_shutdown_client(rpcb_clnt); - } else - error = PTR_ERR(rpcb_clnt); - + error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN); if (error < 0) { dprintk("RPC: failed to contact local rpcbind " "server (errno %d).\n", -error); @@ -279,6 +313,11 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port) struct rpc_message msg = { .rpc_argp = &map, }; + int error; + + error = rpcb_create_local(); + if (error) + return error; dprintk("RPC: %sregistering (%u, %u, %d, %u) with local " "rpcbind\n", (port ? "" : "un"), @@ -288,7 +327,7 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port) if (port) msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET]; - return rpcb_register_call(RPCBVERS_2, &msg); + return rpcb_register_call(rpcb_local_clnt, &msg); } /* @@ -313,7 +352,7 @@ static int rpcb_register_inet4(const struct sockaddr *sap, if (port) msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; - result = rpcb_register_call(RPCBVERS_4, msg); + result = rpcb_register_call(rpcb_local_clnt4, msg); kfree(map->r_addr); return result; } @@ -340,7 +379,7 @@ static int rpcb_register_inet6(const struct sockaddr *sap, if (port) msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; - result = rpcb_register_call(RPCBVERS_4, msg); + result = rpcb_register_call(rpcb_local_clnt4, msg); kfree(map->r_addr); return result; } @@ -356,7 +395,7 @@ static int rpcb_unregister_all_protofamilies(struct rpc_message *msg) map->r_addr = ""; msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; - return rpcb_register_call(RPCBVERS_4, msg); + return rpcb_register_call(rpcb_local_clnt4, msg); } /** @@ -414,6 +453,13 @@ int rpcb_v4_register(const u32 program, const u32 version, struct rpc_message msg = { .rpc_argp = &map, }; + int error; + + error = rpcb_create_local(); + if (error) + return error; + if (rpcb_local_clnt4 == NULL) + return -EPROTONOSUPPORT; if (address == NULL) return rpcb_unregister_all_protofamilies(&msg); @@ -491,7 +537,7 @@ static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbi .rpc_message = &msg, .callback_ops = &rpcb_getport_ops, .callback_data = map, - .flags = RPC_TASK_ASYNC, + .flags = RPC_TASK_ASYNC | RPC_TASK_SOFTCONN, }; return rpc_run_task(&task_setup_data); @@ -1027,3 +1073,15 @@ static struct rpc_program rpcb_program = { .version = rpcb_version, .stats = &rpcb_stats, }; + +/** + * cleanup_rpcb_clnt - remove xprtsock's sysctls, unregister + * + */ +void cleanup_rpcb_clnt(void) +{ + if (rpcb_local_clnt4) + rpc_shutdown_client(rpcb_local_clnt4); + if (rpcb_local_clnt) + rpc_shutdown_client(rpcb_local_clnt); +} diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index cef74ba0666c..aae6907fd546 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -210,6 +210,7 @@ void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qnam { __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); } +EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) { @@ -385,6 +386,20 @@ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct r } /* + * Tests whether rpc queue is empty + */ +int rpc_queue_empty(struct rpc_wait_queue *queue) +{ + int res; + + spin_lock_bh(&queue->lock); + res = queue->qlen; + spin_unlock_bh(&queue->lock); + return (res == 0); +} +EXPORT_SYMBOL_GPL(rpc_queue_empty); + +/* * Wake up a task on a specific queue */ void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 8cce92189019..f438347d817b 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c @@ -24,6 +24,8 @@ extern struct cache_detail ip_map_cache, unix_gid_cache; +extern void cleanup_rpcb_clnt(void); + static int __init init_sunrpc(void) { @@ -53,6 +55,7 @@ out: static void __exit cleanup_sunrpc(void) { + cleanup_rpcb_clnt(); rpcauth_remove_module(); cleanup_socket_xprt(); svc_cleanup_xprt_sock(); diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index b845e2293dfe..7d1f9e928f69 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -16,8 +16,6 @@ #define RPCDBG_FACILITY RPCDBG_SVCXPRT -#define SVC_MAX_WAKING 5 - static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); static int svc_deferred_recv(struct svc_rqst *rqstp); static struct cache_deferred_req *svc_defer(struct cache_req *req); @@ -306,7 +304,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) struct svc_pool *pool; struct svc_rqst *rqstp; int cpu; - int thread_avail; if (!(xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) @@ -318,6 +315,12 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) spin_lock_bh(&pool->sp_lock); + if (!list_empty(&pool->sp_threads) && + !list_empty(&pool->sp_sockets)) + printk(KERN_ERR + "svc_xprt_enqueue: " + "threads and transports both waiting??\n"); + if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { /* Don't enqueue dead transports */ dprintk("svc: transport %p is dead, not enqueued\n", xprt); @@ -358,15 +361,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) } process: - /* Work out whether threads are available */ - thread_avail = !list_empty(&pool->sp_threads); /* threads are asleep */ - if (pool->sp_nwaking >= SVC_MAX_WAKING) { - /* too many threads are runnable and trying to wake up */ - thread_avail = 0; - pool->sp_stats.overloads_avoided++; - } - - if (thread_avail) { + if (!list_empty(&pool->sp_threads)) { rqstp = list_entry(pool->sp_threads.next, struct svc_rqst, rq_list); @@ -381,8 +376,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) svc_xprt_get(xprt); rqstp->rq_reserved = serv->sv_max_mesg; atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); - rqstp->rq_waking = 1; - pool->sp_nwaking++; pool->sp_stats.threads_woken++; BUG_ON(xprt->xpt_pool != pool); wake_up(&rqstp->rq_wait); @@ -651,11 +644,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) return -EINTR; spin_lock_bh(&pool->sp_lock); - if (rqstp->rq_waking) { - rqstp->rq_waking = 0; - pool->sp_nwaking--; - BUG_ON(pool->sp_nwaking < 0); - } xprt = svc_xprt_dequeue(pool); if (xprt) { rqstp->rq_xprt = xprt; @@ -711,7 +699,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) spin_unlock_bh(&pool->sp_lock); len = 0; - if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { + if (test_bit(XPT_LISTENER, &xprt->xpt_flags) && + !test_bit(XPT_CLOSE, &xprt->xpt_flags)) { struct svc_xprt *newxpt; newxpt = xprt->xpt_ops->xpo_accept(xprt); if (newxpt) { @@ -1204,16 +1193,15 @@ static int svc_pool_stats_show(struct seq_file *m, void *p) struct svc_pool *pool = p; if (p == SEQ_START_TOKEN) { - seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n"); + seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); return 0; } - seq_printf(m, "%u %lu %lu %lu %lu %lu\n", + seq_printf(m, "%u %lu %lu %lu %lu\n", pool->sp_id, pool->sp_stats.packets, pool->sp_stats.sockets_queued, pool->sp_stats.threads_woken, - pool->sp_stats.overloads_avoided, pool->sp_stats.threads_timedout); return 0; diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 4a8f6558718a..d8c041114497 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -655,23 +655,25 @@ static struct unix_gid *unix_gid_lookup(uid_t uid) return NULL; } -static int unix_gid_find(uid_t uid, struct group_info **gip, - struct svc_rqst *rqstp) +static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp) { - struct unix_gid *ug = unix_gid_lookup(uid); + struct unix_gid *ug; + struct group_info *gi; + int ret; + + ug = unix_gid_lookup(uid); if (!ug) - return -EAGAIN; - switch (cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle)) { + return ERR_PTR(-EAGAIN); + ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle); + switch (ret) { case -ENOENT: - *gip = NULL; - return 0; + return ERR_PTR(-ENOENT); case 0: - *gip = ug->gi; - get_group_info(*gip); + gi = get_group_info(ug->gi); cache_put(&ug->h, &unix_gid_cache); - return 0; + return gi; default: - return -EAGAIN; + return ERR_PTR(-EAGAIN); } } @@ -681,6 +683,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp) struct sockaddr_in *sin; struct sockaddr_in6 *sin6, sin6_storage; struct ip_map *ipm; + struct group_info *gi; + struct svc_cred *cred = &rqstp->rq_cred; switch (rqstp->rq_addr.ss_family) { case AF_INET: @@ -721,6 +725,17 @@ svcauth_unix_set_client(struct svc_rqst *rqstp) ip_map_cached_put(rqstp, ipm); break; } + + gi = unix_gid_find(cred->cr_uid, rqstp); + switch (PTR_ERR(gi)) { + case -EAGAIN: + return SVC_DROP; + case -ENOENT: + break; + default: + put_group_info(cred->cr_group_info); + cred->cr_group_info = gi; + } return SVC_OK; } @@ -817,19 +832,11 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) slen = svc_getnl(argv); /* gids length */ if (slen > 16 || (len -= (slen + 2)*4) < 0) goto badcred; - if (unix_gid_find(cred->cr_uid, &cred->cr_group_info, rqstp) - == -EAGAIN) + cred->cr_group_info = groups_alloc(slen); + if (cred->cr_group_info == NULL) return SVC_DROP; - if (cred->cr_group_info == NULL) { - cred->cr_group_info = groups_alloc(slen); - if (cred->cr_group_info == NULL) - return SVC_DROP; - for (i = 0; i < slen; i++) - GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv); - } else { - for (i = 0; i < slen ; i++) - svc_getnl(argv); - } + for (i = 0; i < slen; i++) + GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv); if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { *authp = rpc_autherr_badverf; return SVC_DENIED; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index fd46d42afa89..469de292c23c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -700,6 +700,10 @@ void xprt_connect(struct rpc_task *task) } if (!xprt_lock_write(xprt, task)) return; + + if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) + xprt->ops->close(xprt); + if (xprt_connected(xprt)) xprt_release_write(xprt, task); else { diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 04732d09013e..3d739e5d15d8 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2019,7 +2019,7 @@ static void xs_connect(struct rpc_task *task) if (xprt_test_and_set_connecting(xprt)) return; - if (transport->sock != NULL) { + if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { dprintk("RPC: xs_connect delayed xprt %p for %lu " "seconds\n", xprt, xprt->reestablish_timeout / HZ); diff --git a/net/wireless/core.c b/net/wireless/core.c index c2a2c563d21a..92b812442488 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -745,9 +745,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, mutex_unlock(&rdev->devlist_mtx); dev_put(dev); } -#ifdef CONFIG_CFG80211_WEXT cfg80211_lock_rdev(rdev); mutex_lock(&rdev->devlist_mtx); +#ifdef CONFIG_CFG80211_WEXT wdev_lock(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: @@ -760,10 +760,10 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, break; } wdev_unlock(wdev); +#endif rdev->opencount++; mutex_unlock(&rdev->devlist_mtx); cfg80211_unlock_rdev(rdev); -#endif break; case NETDEV_UNREGISTER: /* diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 1001db4912f7..82e6002c8d67 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -93,7 +93,18 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len) } } - WARN_ON(!bss); + /* + * We might be coming here because the driver reported + * a successful association at the same time as the + * user requested a deauth. In that case, we will have + * removed the BSS from the auth_bsses list due to the + * deauth request when the assoc response makes it. If + * the two code paths acquire the lock the other way + * around, that's just the standard situation of a + * deauth being requested while connected. + */ + if (!bss) + goto out; } else if (wdev->conn) { cfg80211_sme_failed_assoc(wdev); /* diff --git a/net/wireless/reg.c b/net/wireless/reg.c index c01470e7de15..7a0754c92df4 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -141,62 +141,35 @@ static const struct ieee80211_regdomain us_regdom = { .reg_rules = { /* IEEE 802.11b/g, channels 1..11 */ REG_RULE(2412-10, 2462+10, 40, 6, 27, 0), - /* IEEE 802.11a, channel 36 */ - REG_RULE(5180-10, 5180+10, 40, 6, 23, 0), - /* IEEE 802.11a, channel 40 */ - REG_RULE(5200-10, 5200+10, 40, 6, 23, 0), - /* IEEE 802.11a, channel 44 */ - REG_RULE(5220-10, 5220+10, 40, 6, 23, 0), + /* IEEE 802.11a, channel 36..48 */ + REG_RULE(5180-10, 5240+10, 40, 6, 17, 0), /* IEEE 802.11a, channels 48..64 */ - REG_RULE(5240-10, 5320+10, 40, 6, 23, 0), + REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS), + /* IEEE 802.11a, channels 100..124 */ + REG_RULE(5500-10, 5590+10, 40, 6, 20, NL80211_RRF_DFS), + /* IEEE 802.11a, channels 132..144 */ + REG_RULE(5660-10, 5700+10, 40, 6, 20, NL80211_RRF_DFS), /* IEEE 802.11a, channels 149..165, outdoor */ REG_RULE(5745-10, 5825+10, 40, 6, 30, 0), } }; static const struct ieee80211_regdomain jp_regdom = { - .n_reg_rules = 3, + .n_reg_rules = 6, .alpha2 = "JP", .reg_rules = { - /* IEEE 802.11b/g, channels 1..14 */ - REG_RULE(2412-10, 2484+10, 40, 6, 20, 0), - /* IEEE 802.11a, channels 34..48 */ - REG_RULE(5170-10, 5240+10, 40, 6, 20, - NL80211_RRF_PASSIVE_SCAN), + /* IEEE 802.11b/g, channels 1..11 */ + REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), + /* IEEE 802.11b/g, channels 12..13 */ + REG_RULE(2467-10, 2472+10, 20, 6, 20, 0), + /* IEEE 802.11b/g, channel 14 */ + REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_NO_OFDM), + /* IEEE 802.11a, channels 36..48 */ + REG_RULE(5180-10, 5240+10, 40, 6, 20, 0), /* IEEE 802.11a, channels 52..64 */ - REG_RULE(5260-10, 5320+10, 40, 6, 20, - NL80211_RRF_NO_IBSS | - NL80211_RRF_DFS), - } -}; - -static const struct ieee80211_regdomain eu_regdom = { - .n_reg_rules = 6, - /* - * This alpha2 is bogus, we leave it here just for stupid - * backward compatibility - */ - .alpha2 = "EU", - .reg_rules = { - /* IEEE 802.11b/g, channels 1..13 */ - REG_RULE(2412-10, 2472+10, 40, 6, 20, 0), - /* IEEE 802.11a, channel 36 */ - REG_RULE(5180-10, 5180+10, 40, 6, 23, - NL80211_RRF_PASSIVE_SCAN), - /* IEEE 802.11a, channel 40 */ - REG_RULE(5200-10, 5200+10, 40, 6, 23, - NL80211_RRF_PASSIVE_SCAN), - /* IEEE 802.11a, channel 44 */ - REG_RULE(5220-10, 5220+10, 40, 6, 23, - NL80211_RRF_PASSIVE_SCAN), - /* IEEE 802.11a, channels 48..64 */ - REG_RULE(5240-10, 5320+10, 40, 6, 20, - NL80211_RRF_NO_IBSS | - NL80211_RRF_DFS), - /* IEEE 802.11a, channels 100..140 */ - REG_RULE(5500-10, 5700+10, 40, 6, 30, - NL80211_RRF_NO_IBSS | - NL80211_RRF_DFS), + REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS), + /* IEEE 802.11a, channels 100..144 */ + REG_RULE(5500-10, 5700+10, 40, 6, 23, NL80211_RRF_DFS), } }; @@ -206,15 +179,17 @@ static const struct ieee80211_regdomain *static_regdom(char *alpha2) return &us_regdom; if (alpha2[0] == 'J' && alpha2[1] == 'P') return &jp_regdom; + /* Use world roaming rules for "EU", since it was a pseudo + domain anyway... */ if (alpha2[0] == 'E' && alpha2[1] == 'U') - return &eu_regdom; - /* Default, as per the old rules */ - return &us_regdom; + return &world_regdom; + /* Default, world roaming rules */ + return &world_regdom; } static bool is_old_static_regdom(const struct ieee80211_regdomain *rd) { - if (rd == &us_regdom || rd == &jp_regdom || rd == &eu_regdom) + if (rd == &us_regdom || rd == &jp_regdom || rd == &world_regdom) return true; return false; } @@ -1715,7 +1690,7 @@ int regulatory_hint_user(const char *alpha2) request->wiphy_idx = WIPHY_IDX_STALE; request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; - request->initiator = NL80211_REGDOM_SET_BY_USER, + request->initiator = NL80211_REGDOM_SET_BY_USER; queue_regulatory_request(request); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 12dfa62aad18..0c2cbbebca95 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -601,7 +601,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, struct cfg80211_registered_device *rdev; struct wiphy *wiphy; struct iw_scan_req *wreq = NULL; - struct cfg80211_scan_request *creq; + struct cfg80211_scan_request *creq = NULL; int i, err, n_channels = 0; enum ieee80211_band band; @@ -694,8 +694,10 @@ int cfg80211_wext_siwscan(struct net_device *dev, /* translate "Scan for SSID" request */ if (wreq) { if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { - if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) - return -EINVAL; + if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) { + err = -EINVAL; + goto out; + } memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len); creq->ssids[0].ssid_len = wreq->essid_len; } @@ -707,12 +709,15 @@ int cfg80211_wext_siwscan(struct net_device *dev, err = rdev->ops->scan(wiphy, dev, creq); if (err) { rdev->scan_req = NULL; - kfree(creq); + /* creq will be freed below */ } else { nl80211_send_scan_start(rdev, dev); + /* creq now owned by driver */ + creq = NULL; dev_hold(dev); } out: + kfree(creq); cfg80211_unlock_rdev(rdev); return err; } diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 2333d78187e4..dc0fc4989d54 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -655,6 +655,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); + wdev->wext.connect.ssid_len = 0; #endif } diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 584eb4826e02..54face3d4424 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -479,6 +479,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev, } err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr); } + wdev->wext.connect.privacy = false; /* * Applications using wireless extensions expect to be * able to delete keys that don't exist, so allow that. diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index cb81ca35b0d6..0ecb16a9a883 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -469,16 +469,16 @@ static inline int xfrm_byidx_should_resize(struct net *net, int total) return 0; } -void xfrm_spd_getinfo(struct xfrmk_spdinfo *si) +void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) { read_lock_bh(&xfrm_policy_lock); - si->incnt = init_net.xfrm.policy_count[XFRM_POLICY_IN]; - si->outcnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT]; - si->fwdcnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD]; - si->inscnt = init_net.xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; - si->outscnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; - si->fwdscnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; - si->spdhcnt = init_net.xfrm.policy_idx_hmask; + si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; + si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; + si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; + si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; + si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; + si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; + si->spdhcnt = net->xfrm.policy_idx_hmask; si->spdhmcnt = xfrm_policy_hashmax; read_unlock_bh(&xfrm_policy_lock); } @@ -1309,15 +1309,28 @@ static inline int xfrm_get_tos(struct flowi *fl, int family) return tos; } -static inline struct xfrm_dst *xfrm_alloc_dst(int family) +static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) { struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); + struct dst_ops *dst_ops; struct xfrm_dst *xdst; if (!afinfo) return ERR_PTR(-EINVAL); - xdst = dst_alloc(afinfo->dst_ops) ?: ERR_PTR(-ENOBUFS); + switch (family) { + case AF_INET: + dst_ops = &net->xfrm.xfrm4_dst_ops; + break; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + case AF_INET6: + dst_ops = &net->xfrm.xfrm6_dst_ops; + break; +#endif + default: + BUG(); + } + xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); xfrm_policy_put_afinfo(afinfo); @@ -1366,6 +1379,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, struct flowi *fl, struct dst_entry *dst) { + struct net *net = xp_net(policy); unsigned long now = jiffies; struct net_device *dev; struct dst_entry *dst_prev = NULL; @@ -1389,7 +1403,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, dst_hold(dst); for (; i < nx; i++) { - struct xfrm_dst *xdst = xfrm_alloc_dst(family); + struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); struct dst_entry *dst1 = &xdst->u.dst; err = PTR_ERR(xdst); @@ -1445,7 +1459,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, if (!dev) goto free_dst; - /* Copy neighbout for reachability confirmation */ + /* Copy neighbour for reachability confirmation */ dst0->neighbour = neigh_clone(dst->neighbour); xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); @@ -2279,6 +2293,7 @@ EXPORT_SYMBOL(xfrm_bundle_ok); int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) { + struct net *net; int err = 0; if (unlikely(afinfo == NULL)) return -EINVAL; @@ -2302,6 +2317,27 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) xfrm_policy_afinfo[afinfo->family] = afinfo; } write_unlock_bh(&xfrm_policy_afinfo_lock); + + rtnl_lock(); + for_each_net(net) { + struct dst_ops *xfrm_dst_ops; + + switch (afinfo->family) { + case AF_INET: + xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops; + break; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + case AF_INET6: + xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops; + break; +#endif + default: + BUG(); + } + *xfrm_dst_ops = *afinfo->dst_ops; + } + rtnl_unlock(); + return err; } EXPORT_SYMBOL(xfrm_policy_register_afinfo); @@ -2332,6 +2368,22 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) } EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); +static void __net_init xfrm_dst_ops_init(struct net *net) +{ + struct xfrm_policy_afinfo *afinfo; + + read_lock_bh(&xfrm_policy_afinfo_lock); + afinfo = xfrm_policy_afinfo[AF_INET]; + if (afinfo) + net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + afinfo = xfrm_policy_afinfo[AF_INET6]; + if (afinfo) + net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; +#endif + read_unlock_bh(&xfrm_policy_afinfo_lock); +} + static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) { struct xfrm_policy_afinfo *afinfo; @@ -2494,6 +2546,7 @@ static int __net_init xfrm_net_init(struct net *net) rv = xfrm_policy_init(net); if (rv < 0) goto out_policy; + xfrm_dst_ops_init(net); rv = xfrm_sysctl_init(net); if (rv < 0) goto out_sysctl; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index d847f1a52b44..b36cc344474b 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -641,11 +641,11 @@ out: } EXPORT_SYMBOL(xfrm_state_flush); -void xfrm_sad_getinfo(struct xfrmk_sadinfo *si) +void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) { spin_lock_bh(&xfrm_state_lock); - si->sadcnt = init_net.xfrm.state_num; - si->sadhcnt = init_net.xfrm.state_hmask; + si->sadcnt = net->xfrm.state_num; + si->sadhcnt = net->xfrm.state_hmask; si->sadhmcnt = xfrm_state_hashmax; spin_unlock_bh(&xfrm_state_lock); } diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 1ada6186933c..d5a712976004 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -781,7 +781,8 @@ static inline size_t xfrm_spdinfo_msgsize(void) + nla_total_size(sizeof(struct xfrmu_spdhinfo)); } -static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) +static int build_spdinfo(struct sk_buff *skb, struct net *net, + u32 pid, u32 seq, u32 flags) { struct xfrmk_spdinfo si; struct xfrmu_spdinfo spc; @@ -795,7 +796,7 @@ static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) f = nlmsg_data(nlh); *f = flags; - xfrm_spd_getinfo(&si); + xfrm_spd_getinfo(net, &si); spc.incnt = si.incnt; spc.outcnt = si.outcnt; spc.fwdcnt = si.fwdcnt; @@ -828,7 +829,7 @@ static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, if (r_skb == NULL) return -ENOMEM; - if (build_spdinfo(r_skb, spid, seq, *flags) < 0) + if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0) BUG(); return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); @@ -841,7 +842,8 @@ static inline size_t xfrm_sadinfo_msgsize(void) + nla_total_size(4); /* XFRMA_SAD_CNT */ } -static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) +static int build_sadinfo(struct sk_buff *skb, struct net *net, + u32 pid, u32 seq, u32 flags) { struct xfrmk_sadinfo si; struct xfrmu_sadhinfo sh; @@ -854,7 +856,7 @@ static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) f = nlmsg_data(nlh); *f = flags; - xfrm_sad_getinfo(&si); + xfrm_sad_getinfo(net, &si); sh.sadhmcnt = si.sadhmcnt; sh.sadhcnt = si.sadhcnt; @@ -882,7 +884,7 @@ static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, if (r_skb == NULL) return -ENOMEM; - if (build_sadinfo(r_skb, spid, seq, *flags) < 0) + if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0) BUG(); return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); |