diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-07-25 12:06:01 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-07-25 12:06:02 +1000 |
commit | fb6813f480806d62361719e84777c8e00d3e86a8 (patch) | |
tree | 800f68b515a39b554ed3a852ba36ea54c1beef8c /drivers/net | |
parent | 97402b96f87c6e32f75f1bffdd91a5ee144b679d (diff) | |
download | linux-3.10-fb6813f480806d62361719e84777c8e00d3e86a8.tar.gz linux-3.10-fb6813f480806d62361719e84777c8e00d3e86a8.tar.bz2 linux-3.10-fb6813f480806d62361719e84777c8e00d3e86a8.zip |
virtio: Recycle unused recv buffer pages for large skbs in net driver
If we hack the virtio_net driver to always allocate full-sized (64k+)
skbuffs, the driver slows down (lguest numbers):
Time to receive 1GB (small buffers): 10.85 seconds
Time to receive 1GB (64k+ buffers): 24.75 seconds
Of course, large buffers use up more space in the ring, so we increase
that from 128 to 2048:
Time to receive 1GB (64k+ buffers, 2k ring): 16.61 seconds
If we recycle pages rather than using alloc_page/free_page:
Time to receive 1GB (64k+ buffers, 2k ring, recycle pages): 10.81 seconds
This demonstrates that with efficient allocation, we don't need to
have a separate "small buffer" queue.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/virtio_net.c | 36 |
1 files changed, 35 insertions, 1 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 0886b8a2d92..0196a0df902 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -61,6 +61,9 @@ struct virtnet_info /* Receive & send queues. */ struct sk_buff_head recv; struct sk_buff_head send; + + /* Chain pages by the private ptr. */ + struct page *pages; }; static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb) @@ -73,6 +76,23 @@ static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb) sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr)); } +static void give_a_page(struct virtnet_info *vi, struct page *page) +{ + page->private = (unsigned long)vi->pages; + vi->pages = page; +} + +static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) +{ + struct page *p = vi->pages; + + if (p) + vi->pages = (struct page *)p->private; + else + p = alloc_page(gfp_mask); + return p; +} + static void skb_xmit_done(struct virtqueue *svq) { struct virtnet_info *vi = svq->vdev->priv; @@ -101,6 +121,15 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb, } len -= sizeof(struct virtio_net_hdr); + if (len <= MAX_PACKET_LEN) { + unsigned int i; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) + give_a_page(dev->priv, skb_shinfo(skb)->frags[i].page); + skb->data_len = 0; + skb_shinfo(skb)->nr_frags = 0; + } + err = pskb_trim(skb, len); if (err) { pr_debug("%s: pskb_trim failed %i %d\n", dev->name, len, err); @@ -183,7 +212,7 @@ static void try_fill_recv(struct virtnet_info *vi) if (vi->big_packets) { for (i = 0; i < MAX_SKB_FRAGS; i++) { skb_frag_t *f = &skb_shinfo(skb)->frags[i]; - f->page = alloc_page(GFP_ATOMIC); + f->page = get_a_page(vi, GFP_ATOMIC); if (!f->page) break; @@ -506,6 +535,7 @@ static int virtnet_probe(struct virtio_device *vdev) vi->dev = dev; vi->vdev = vdev; vdev->priv = vi; + vi->pages = NULL; /* If they give us a callback when all buffers are done, we don't need * the timer. */ @@ -591,6 +621,10 @@ static void virtnet_remove(struct virtio_device *vdev) vdev->config->del_vq(vi->svq); vdev->config->del_vq(vi->rvq); unregister_netdev(vi->dev); + + while (vi->pages) + __free_pages(get_a_page(vi, GFP_KERNEL), 0); + free_netdev(vi->dev); } |