summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2013-12-30 12:37:29 -0800
committerDan Williams <dan.j.williams@intel.com>2014-09-28 07:05:16 -0700
commit7bced397510ab569d31de4c70b39e13355046387 (patch)
treefaa4067a53e42acffc752e9a153e7dbaed4126e5 /drivers
parent08223d80df38e666a42d7c82eb340db55c6e03bd (diff)
downloadlinux-exynos-7bced397510ab569d31de4c70b39e13355046387.tar.gz
linux-exynos-7bced397510ab569d31de4c70b39e13355046387.tar.bz2
linux-exynos-7bced397510ab569d31de4c70b39e13355046387.zip
net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used and there is no plan to fix it. This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards. Reverting the remainder of the net_dma induced changes is deferred to subsequent patches. Marked for stable due to Roman's report of a memory leak in dma_pin_iovec_pages(): https://lkml.org/lkml/2014/9/3/177 Cc: Dave Jiang <dave.jiang@intel.com> Cc: Vinod Koul <vinod.koul@intel.com> Cc: David Whipple <whipple@securedatainnovations.ch> Cc: Alexander Duyck <alexander.h.duyck@intel.com> Cc: <stable@vger.kernel.org> Reported-by: Roman Gushchin <klamm@yandex-team.ru> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dmaengine.c104
-rw-r--r--drivers/dma/ioat/dma.c1
-rw-r--r--drivers/dma/ioat/dma.h7
-rw-r--r--drivers/dma/ioat/dma_v2.c1
-rw-r--r--drivers/dma/ioat/dma_v3.c1
-rw-r--r--drivers/dma/iovlock.c280
8 files changed, 0 insertions, 407 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 605b016bcea4..6b5f37e01a70 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -368,18 +368,6 @@ config DMA_OF
comment "DMA Clients"
depends on DMA_ENGINE
-config NET_DMA
- bool "Network: TCP receive copy offload"
- depends on DMA_ENGINE && NET
- default (INTEL_IOATDMA || FSL_DMA)
- depends on BROKEN
- help
- This enables the use of DMA engines in the network stack to
- offload receive copy-to-user operations, freeing CPU cycles.
-
- Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
- say N.
-
config ASYNC_TX_DMA
bool "Async_tx: Offload support for the async_tx api"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a029d0f4a1be..0c9dc7549327 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -6,7 +6,6 @@ obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
obj-$(CONFIG_DMA_OF) += of-dma.o
-obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ed610b497518..268de183b519 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1084,110 +1084,6 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
}
EXPORT_SYMBOL(dmaengine_get_unmap_data);
-/**
- * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
- * @chan: DMA channel to offload copy to
- * @dest_pg: destination page
- * @dest_off: offset in page to copy to
- * @src_pg: source page
- * @src_off: offset in page to copy from
- * @len: length
- *
- * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
- * address according to the DMA mapping API rules for streaming mappings.
- * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
- * (kernel memory or locked user space pages).
- */
-dma_cookie_t
-dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
- unsigned int dest_off, struct page *src_pg, unsigned int src_off,
- size_t len)
-{
- struct dma_device *dev = chan->device;
- struct dma_async_tx_descriptor *tx;
- struct dmaengine_unmap_data *unmap;
- dma_cookie_t cookie;
- unsigned long flags;
-
- unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
- if (!unmap)
- return -ENOMEM;
-
- unmap->to_cnt = 1;
- unmap->from_cnt = 1;
- unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
- DMA_TO_DEVICE);
- unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
- DMA_FROM_DEVICE);
- unmap->len = len;
- flags = DMA_CTRL_ACK;
- tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
- len, flags);
-
- if (!tx) {
- dmaengine_unmap_put(unmap);
- return -ENOMEM;
- }
-
- dma_set_unmap(tx, unmap);
- cookie = tx->tx_submit(tx);
- dmaengine_unmap_put(unmap);
-
- preempt_disable();
- __this_cpu_add(chan->local->bytes_transferred, len);
- __this_cpu_inc(chan->local->memcpy_count);
- preempt_enable();
-
- return cookie;
-}
-EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
-
-/**
- * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
- * @chan: DMA channel to offload copy to
- * @dest: destination address (virtual)
- * @src: source address (virtual)
- * @len: length
- *
- * Both @dest and @src must be mappable to a bus address according to the
- * DMA mapping API rules for streaming mappings.
- * Both @dest and @src must stay memory resident (kernel memory or locked
- * user space pages).
- */
-dma_cookie_t
-dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
- void *src, size_t len)
-{
- return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
- (unsigned long) dest & ~PAGE_MASK,
- virt_to_page(src),
- (unsigned long) src & ~PAGE_MASK, len);
-}
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
-
-/**
- * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
- * @chan: DMA channel to offload copy to
- * @page: destination page
- * @offset: offset in page to copy to
- * @kdata: source address (virtual)
- * @len: length
- *
- * Both @page/@offset and @kdata must be mappable to a bus address according
- * to the DMA mapping API rules for streaming mappings.
- * Both @page/@offset and @kdata must stay memory resident (kernel memory or
- * locked user space pages)
- */
-dma_cookie_t
-dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
- unsigned int offset, void *kdata, size_t len)
-{
- return dma_async_memcpy_pg_to_pg(chan, page, offset,
- virt_to_page(kdata),
- (unsigned long) kdata & ~PAGE_MASK, len);
-}
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
-
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan)
{
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index b76c1485933b..940c1502a8b5 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -1222,7 +1222,6 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca)
err = ioat_probe(device);
if (err)
return err;
- ioat_set_tcp_copy_break(4096);
err = ioat_register(device);
if (err)
return err;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index e982f00a9843..d63f68b1aa35 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -214,13 +214,6 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
#define dump_desc_dbg(c, d) \
({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
-static inline void ioat_set_tcp_copy_break(unsigned long copybreak)
-{
- #ifdef CONFIG_NET_DMA
- sysctl_tcp_dma_copybreak = copybreak;
- #endif
-}
-
static inline struct ioat_chan_common *
ioat_chan_by_index(struct ioatdma_device *device, int index)
{
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 2ce9be498608..695483e6be32 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -900,7 +900,6 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca)
err = ioat_probe(device);
if (err)
return err;
- ioat_set_tcp_copy_break(2048);
list_for_each_entry(c, &dma->channels, device_node) {
chan = to_chan_common(c);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 85971d6e9646..895f869d6c2c 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -1655,7 +1655,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
err = ioat_probe(device);
if (err)
return err;
- ioat_set_tcp_copy_break(262144);
list_for_each_entry(c, &dma->channels, device_node) {
chan = to_chan_common(c);
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c
deleted file mode 100644
index bb48a57c2fc1..000000000000
--- a/drivers/dma/iovlock.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
- * Portions based on net/core/datagram.c and copyrighted by their authors.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-
-/*
- * This code allows the net stack to make use of a DMA engine for
- * skb to iovec copies.
- */
-
-#include <linux/dmaengine.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <net/tcp.h> /* for memcpy_toiovec */
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
-static int num_pages_spanned(struct iovec *iov)
-{
- return
- ((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
- ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT);
-}
-
-/*
- * Pin down all the iovec pages needed for len bytes.
- * Return a struct dma_pinned_list to keep track of pages pinned down.
- *
- * We are allocating a single chunk of memory, and then carving it up into
- * 3 sections, the latter 2 whose size depends on the number of iovecs and the
- * total number of pages, respectively.
- */
-struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
-{
- struct dma_pinned_list *local_list;
- struct page **pages;
- int i;
- int ret;
- int nr_iovecs = 0;
- int iovec_len_used = 0;
- int iovec_pages_used = 0;
-
- /* don't pin down non-user-based iovecs */
- if (segment_eq(get_fs(), KERNEL_DS))
- return NULL;
-
- /* determine how many iovecs/pages there are, up front */
- do {
- iovec_len_used += iov[nr_iovecs].iov_len;
- iovec_pages_used += num_pages_spanned(&iov[nr_iovecs]);
- nr_iovecs++;
- } while (iovec_len_used < len);
-
- /* single kmalloc for pinned list, page_list[], and the page arrays */
- local_list = kmalloc(sizeof(*local_list)
- + (nr_iovecs * sizeof (struct dma_page_list))
- + (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
- if (!local_list)
- goto out;
-
- /* list of pages starts right after the page list array */
- pages = (struct page **) &local_list->page_list[nr_iovecs];
-
- local_list->nr_iovecs = 0;
-
- for (i = 0; i < nr_iovecs; i++) {
- struct dma_page_list *page_list = &local_list->page_list[i];
-
- len -= iov[i].iov_len;
-
- if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
- goto unpin;
-
- page_list->nr_pages = num_pages_spanned(&iov[i]);
- page_list->base_address = iov[i].iov_base;
-
- page_list->pages = pages;
- pages += page_list->nr_pages;
-
- /* pin pages down */
- down_read(&current->mm->mmap_sem);
- ret = get_user_pages(
- current,
- current->mm,
- (unsigned long) iov[i].iov_base,
- page_list->nr_pages,
- 1, /* write */
- 0, /* force */
- page_list->pages,
- NULL);
- up_read(&current->mm->mmap_sem);
-
- if (ret != page_list->nr_pages)
- goto unpin;
-
- local_list->nr_iovecs = i + 1;
- }
-
- return local_list;
-
-unpin:
- dma_unpin_iovec_pages(local_list);
-out:
- return NULL;
-}
-
-void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
-{
- int i, j;
-
- if (!pinned_list)
- return;
-
- for (i = 0; i < pinned_list->nr_iovecs; i++) {
- struct dma_page_list *page_list = &pinned_list->page_list[i];
- for (j = 0; j < page_list->nr_pages; j++) {
- set_page_dirty_lock(page_list->pages[j]);
- page_cache_release(page_list->pages[j]);
- }
- }
-
- kfree(pinned_list);
-}
-
-
-/*
- * We have already pinned down the pages we will be using in the iovecs.
- * Each entry in iov array has corresponding entry in pinned_list->page_list.
- * Using array indexing to keep iov[] and page_list[] in sync.
- * Initial elements in iov array's iov->iov_len will be 0 if already copied into
- * by another call.
- * iov array length remaining guaranteed to be bigger than len.
- */
-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
- struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len)
-{
- int iov_byte_offset;
- int copy;
- dma_cookie_t dma_cookie = 0;
- int iovec_idx;
- int page_idx;
-
- if (!chan)
- return memcpy_toiovec(iov, kdata, len);
-
- iovec_idx = 0;
- while (iovec_idx < pinned_list->nr_iovecs) {
- struct dma_page_list *page_list;
-
- /* skip already used-up iovecs */
- while (!iov[iovec_idx].iov_len)
- iovec_idx++;
-
- page_list = &pinned_list->page_list[iovec_idx];
-
- iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
- page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
- - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
-
- /* break up copies to not cross page boundary */
- while (iov[iovec_idx].iov_len) {
- copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
- copy = min_t(int, copy, iov[iovec_idx].iov_len);
-
- dma_cookie = dma_async_memcpy_buf_to_pg(chan,
- page_list->pages[page_idx],
- iov_byte_offset,
- kdata,
- copy);
- /* poll for a descriptor slot */
- if (unlikely(dma_cookie < 0)) {
- dma_async_issue_pending(chan);
- continue;
- }
-
- len -= copy;
- iov[iovec_idx].iov_len -= copy;
- iov[iovec_idx].iov_base += copy;
-
- if (!len)
- return dma_cookie;
-
- kdata += copy;
- iov_byte_offset = 0;
- page_idx++;
- }
- iovec_idx++;
- }
-
- /* really bad if we ever run out of iovecs */
- BUG();
- return -EFAULT;
-}
-
-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
- struct dma_pinned_list *pinned_list, struct page *page,
- unsigned int offset, size_t len)
-{
- int iov_byte_offset;
- int copy;
- dma_cookie_t dma_cookie = 0;
- int iovec_idx;
- int page_idx;
- int err;
-
- /* this needs as-yet-unimplemented buf-to-buff, so punt. */
- /* TODO: use dma for this */
- if (!chan || !pinned_list) {
- u8 *vaddr = kmap(page);
- err = memcpy_toiovec(iov, vaddr + offset, len);
- kunmap(page);
- return err;
- }
-
- iovec_idx = 0;
- while (iovec_idx < pinned_list->nr_iovecs) {
- struct dma_page_list *page_list;
-
- /* skip already used-up iovecs */
- while (!iov[iovec_idx].iov_len)
- iovec_idx++;
-
- page_list = &pinned_list->page_list[iovec_idx];
-
- iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
- page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
- - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
-
- /* break up copies to not cross page boundary */
- while (iov[iovec_idx].iov_len) {
- copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
- copy = min_t(int, copy, iov[iovec_idx].iov_len);
-
- dma_cookie = dma_async_memcpy_pg_to_pg(chan,
- page_list->pages[page_idx],
- iov_byte_offset,
- page,
- offset,
- copy);
- /* poll for a descriptor slot */
- if (unlikely(dma_cookie < 0)) {
- dma_async_issue_pending(chan);
- continue;
- }
-
- len -= copy;
- iov[iovec_idx].iov_len -= copy;
- iov[iovec_idx].iov_base += copy;
-
- if (!len)
- return dma_cookie;
-
- offset += copy;
- iov_byte_offset = 0;
- page_idx++;
- }
- iovec_idx++;
- }
-
- /* really bad if we ever run out of iovecs */
- BUG();
- return -EFAULT;
-}