summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2006-11-14 10:43:58 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-02 21:22:32 -0800
commita1bcfacd0577ff477e934731d4ceb3d26eab947d (patch)
treef5a7b2e9933a72135a49a1348b7839e8afdb020e /net
parentd23ca15a21804631d8f787a0cc5646df81b9c2ea (diff)
downloadlinux-3.10-a1bcfacd0577ff477e934731d4ceb3d26eab947d.tar.gz
linux-3.10-a1bcfacd0577ff477e934731d4ceb3d26eab947d.tar.bz2
linux-3.10-a1bcfacd0577ff477e934731d4ceb3d26eab947d.zip
netpoll: private skb pool (rev3)
It was a dark and stormy night when Steve first saw the netpoll beast. The beast was odd, and misshapen but not extremely ugly. "Let me take off one of your warts" he said. This wart is where you tried to make an skb list yourself. If the beast had ever run out of memory, he would have stupefied himself unnecessarily. The first try was painful, so he tried again till the bleeding stopped. And again, and again... Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Diffstat (limited to 'net')
-rw-r--r--net/core/netpoll.c53
1 files changed, 21 insertions, 32 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 6589adb14cb..4de62f1f413 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -36,9 +36,7 @@
#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
#define MAX_RETRIES 20000
-static DEFINE_SPINLOCK(skb_list_lock);
-static int nr_skbs;
-static struct sk_buff *skbs;
+static struct sk_buff_head skb_pool;
static DEFINE_SPINLOCK(queue_lock);
static int queue_depth;
@@ -190,17 +188,15 @@ static void refill_skbs(void)
struct sk_buff *skb;
unsigned long flags;
- spin_lock_irqsave(&skb_list_lock, flags);
- while (nr_skbs < MAX_SKBS) {
+ spin_lock_irqsave(&skb_pool.lock, flags);
+ while (skb_pool.qlen < MAX_SKBS) {
skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
if (!skb)
break;
- skb->next = skbs;
- skbs = skb;
- nr_skbs++;
+ __skb_queue_tail(&skb_pool, skb);
}
- spin_unlock_irqrestore(&skb_list_lock, flags);
+ spin_unlock_irqrestore(&skb_pool.lock, flags);
}
static void zap_completion_queue(void)
@@ -229,38 +225,25 @@ static void zap_completion_queue(void)
put_cpu_var(softnet_data);
}
-static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
+static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
{
- int once = 1, count = 0;
- unsigned long flags;
- struct sk_buff *skb = NULL;
+ int count = 0;
+ struct sk_buff *skb;
zap_completion_queue();
+ refill_skbs();
repeat:
- if (nr_skbs < MAX_SKBS)
- refill_skbs();
skb = alloc_skb(len, GFP_ATOMIC);
-
- if (!skb) {
- spin_lock_irqsave(&skb_list_lock, flags);
- skb = skbs;
- if (skb) {
- skbs = skb->next;
- skb->next = NULL;
- nr_skbs--;
- }
- spin_unlock_irqrestore(&skb_list_lock, flags);
- }
+ if (!skb)
+ skb = skb_dequeue(&skb_pool);
if(!skb) {
- count++;
- if (once && (count == 1000000)) {
- printk("out of netpoll skbs!\n");
- once = 0;
+ if (++count < 10) {
+ netpoll_poll(np);
+ goto repeat;
}
- netpoll_poll(np);
- goto repeat;
+ return NULL;
}
atomic_set(&skb->users, 1);
@@ -770,6 +753,12 @@ int netpoll_setup(struct netpoll *np)
return -1;
}
+static int __init netpoll_init(void) {
+ skb_queue_head_init(&skb_pool);
+ return 0;
+}
+core_initcall(netpoll_init);
+
void netpoll_cleanup(struct netpoll *np)
{
struct netpoll_info *npinfo;