diff options
author | David S. Miller <davem@davemloft.net> | 2013-01-29 15:32:13 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-01-29 15:32:13 -0500 |
commit | f1e7b73acc26e8908af783bcd3a9900fd80688f5 (patch) | |
tree | 9a9382fb7f12f1889020efb4bffa3f4a88589fc5 /drivers/net/tun.c | |
parent | 218774dc341f219bfcf940304a081b121a0e8099 (diff) | |
parent | fc16e884a2320198b8cb7bc2fdcf6b4485e79709 (diff) | |
download | linux-3.10-f1e7b73acc26e8908af783bcd3a9900fd80688f5.tar.gz linux-3.10-f1e7b73acc26e8908af783bcd3a9900fd80688f5.tar.bz2 linux-3.10-f1e7b73acc26e8908af783bcd3a9900fd80688f5.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Bring in the 'net' tree so that we can get some ipv4/ipv6 bug
fixes that some net-next work will build upon.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tun.c')
-rw-r--r-- | drivers/net/tun.c | 22 |
1 files changed, 14 insertions, 8 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 293ce8dfc9e..8d208dd9296 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -109,11 +109,11 @@ struct tap_filter { unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; }; -/* 1024 is probably a high enough limit: modern hypervisors seem to support on - * the order of 100-200 CPUs so this leaves us some breathing space if we want - * to match a queue per guest CPU. - */ -#define MAX_TAP_QUEUES 1024 +/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for + * the netdevice to be fit in one page. So we can make sure the success of + * memory allocation. TODO: increase the limit. */ +#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES +#define MAX_TAP_FLOWS 4096 #define TUN_FLOW_EXPIRE (3 * HZ) @@ -186,6 +186,7 @@ struct tun_struct { unsigned int numdisabled; struct list_head disabled; void *security; + u32 flow_count; }; static inline u32 tun_hashfn(u32 rxhash) @@ -219,6 +220,7 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, e->queue_index = queue_index; e->tun = tun; hlist_add_head_rcu(&e->hash_link, head); + ++tun->flow_count; } return e; } @@ -229,6 +231,7 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) e->rxhash, e->queue_index); hlist_del_rcu(&e->hash_link); kfree_rcu(e, rcu); + --tun->flow_count; } static void tun_flow_flush(struct tun_struct *tun) @@ -318,7 +321,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash, e->updated = jiffies; } else { spin_lock_bh(&tun->lock); - if (!tun_flow_find(head, rxhash)) + if (!tun_flow_find(head, rxhash) && + tun->flow_count < MAX_TAP_FLOWS) tun_flow_create(tun, head, rxhash, queue_index); if (!timer_pending(&tun->flow_gc_timer)) @@ -1587,6 +1591,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) else { char *name; unsigned long flags = 0; + int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? + MAX_TAP_QUEUES : 1; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; @@ -1610,8 +1616,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) name = ifr->ifr_name; dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, - tun_setup, - MAX_TAP_QUEUES, MAX_TAP_QUEUES); + tun_setup, queues, queues); + if (!dev) return -ENOMEM; |