summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 09:24:48 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 09:24:48 -0800
commit5115f3c19d17851aaff5a857f55b4a019c908775 (patch)
tree0d02cf01e12e86365f4f5e3b234f986daef181a7 /crypto
parentc41b3810c09e60664433548c5218cc6ece6a8903 (diff)
parent17166a3b6e88b93189e6be5f7e1335a3cc4fa965 (diff)
downloadlinux-3.10-5115f3c19d17851aaff5a857f55b4a019c908775.tar.gz
linux-3.10-5115f3c19d17851aaff5a857f55b4a019c908775.tar.bz2
linux-3.10-5115f3c19d17851aaff5a857f55b4a019c908775.zip
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul: "This is fairly big pull by my standards as I had missed last merge window. So we have the support for device tree for slave-dmaengine, large updates to dw_dmac driver from Andy for reusing on different architectures. Along with this we have fixes on bunch of the drivers" Fix up trivial conflicts, usually due to #include line movement next to each other. * 'next' of git://git.infradead.org/users/vkoul/slave-dma: (111 commits) Revert "ARM: SPEAr13xx: Pass DW DMAC platform data from DT" ARM: dts: pl330: Add #dma-cells for generic dma binding support DMA: PL330: Register the DMA controller with the generic DMA helpers DMA: PL330: Add xlate function DMA: PL330: Add new pl330 filter for DT case. dma: tegra20-apb-dma: remove unnecessary assignment edma: do not waste memory for dma_mask dma: coh901318: set residue only if dma is in progress dma: coh901318: avoid unbalanced locking dmaengine.h: remove redundant else keyword dma: of-dma: protect list write operation by spin_lock dmaengine: ste_dma40: do not remove descriptors for cyclic transfers dma: of-dma.c: fix memory leakage dw_dmac: apply default dma_mask if needed dmaengine: ioat - fix spare sparse complain dmaengine: move drivers/of/dma.c -> drivers/dma/of-dma.c ioatdma: fix race between updating ioat->head and IOAT_COMPLETION_PENDING dw_dmac: add support for Lynxpoint DMA controllers dw_dmac: return proper residue value dw_dmac: fill individual length of descriptor ...
Diffstat (limited to 'crypto')
-rw-r--r--crypto/async_tx/async_memcpy.c6
-rw-r--r--crypto/async_tx/async_memset.c1
-rw-r--r--crypto/async_tx/async_tx.c9
-rw-r--r--crypto/async_tx/async_xor.c4
4 files changed, 13 insertions, 7 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 361b5e8239b..9e62feffb37 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -67,6 +67,12 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
len, dma_prep_flags);
+ if (!tx) {
+ dma_unmap_page(device->dev, dma_dest, len,
+ DMA_FROM_DEVICE);
+ dma_unmap_page(device->dev, dma_src, len,
+ DMA_TO_DEVICE);
+ }
}
if (tx) {
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index 58e4a8752ae..05a4d1e0014 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -25,6 +25,7 @@
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/async_tx.h>
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 84212097937..7be34248b45 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -128,8 +128,8 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
}
device->device_issue_pending(chan);
} else {
- if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
- panic("%s: DMA_ERROR waiting for depend_tx\n",
+ if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS)
+ panic("%s: DMA error waiting for depend_tx\n",
__func__);
tx->tx_submit(tx);
}
@@ -280,8 +280,9 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
* we are referring to the correct operation
*/
BUG_ON(async_tx_test_ack(*tx));
- if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
- panic("DMA_ERROR waiting for transaction\n");
+ if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS)
+ panic("%s: DMA error waiting for transaction\n",
+ __func__);
async_tx_ack(*tx);
*tx = NULL;
}
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 154cc84381c..8ade0a0481c 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -230,9 +230,7 @@ EXPORT_SYMBOL_GPL(async_xor);
static int page_is_zero(struct page *p, unsigned int offset, size_t len)
{
- char *a = page_address(p) + offset;
- return ((*(u32 *) a) == 0 &&
- memcmp(a, a + 4, len - 4) == 0);
+ return !memchr_inv(page_address(p) + offset, 0, len);
}
static inline struct dma_chan *