summaryrefslogtreecommitdiff
path: root/crypto/async_tx
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 12:03:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 12:03:18 -0700
commit5554b35933245e95710d709175e14c02cbc956a4 (patch)
tree2eeb2f05a7061da3c9a3bc9ea69a344b990c6b49 /crypto/async_tx
parent0f6e38a6381446eff5175b77d1094834a633a90f (diff)
parent7f1b358a236ee9c19657a619ac6f2dcabcaa0924 (diff)
downloadlinux-stable-5554b35933245e95710d709175e14c02cbc956a4.tar.gz
linux-stable-5554b35933245e95710d709175e14c02cbc956a4.tar.bz2
linux-stable-5554b35933245e95710d709175e14c02cbc956a4.zip
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (24 commits) I/OAT: I/OAT version 3.0 support I/OAT: tcp_dma_copybreak default value dependent on I/OAT version I/OAT: Add watchdog/reset functionality to ioatdma iop_adma: cleanup iop_chan_xor_slot_count iop_adma: document how to calculate the minimum descriptor pool size iop_adma: directly reclaim descriptors on allocation failure async_tx: make async_tx_test_ack a boolean routine async_tx: remove depend_tx from async_tx_sync_epilog async_tx: export async_tx_quiesce async_tx: fix handling of the "out of descriptor" condition in async_xor async_tx: ensure the xor destination buffer remains dma-mapped async_tx: list_for_each_entry_rcu() cleanup dmaengine: Driver for the Synopsys DesignWare DMA controller dmaengine: Add slave DMA interface dmaengine: add DMA_COMPL_SKIP_{SRC,DEST}_UNMAP flags to control dma unmap dmaengine: Add dma_client parameter to device_alloc_chan_resources dmatest: Simple DMA memcpy test client dmaengine: DMA engine driver for Marvell XOR engine iop-adma: fix platform driver hotplug/coldplug dmaengine: track the number of clients using a channel ... Fixed up conflict in drivers/dca/dca-sysfs.c manually
Diffstat (limited to 'crypto/async_tx')
-rw-r--r--crypto/async_tx/async_memcpy.c12
-rw-r--r--crypto/async_tx/async_memset.c12
-rw-r--r--crypto/async_tx/async_tx.c33
-rw-r--r--crypto/async_tx/async_xor.c262
4 files changed, 141 insertions, 178 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index a5eda80e8427..ddccfb01c416 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -73,15 +73,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
pr_debug("%s: (sync) len: %zu\n", __func__, len);
/* wait for any prerequisite operations */
- if (depend_tx) {
- /* if ack is already set then we cannot be sure
- * we are referring to the correct operation
- */
- BUG_ON(async_tx_test_ack(depend_tx));
- if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
- panic("%s: DMA_ERROR waiting for depend_tx\n",
- __func__);
- }
+ async_tx_quiesce(&depend_tx);
dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
src_buf = kmap_atomic(src, KM_USER1) + src_offset;
@@ -91,7 +83,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
kunmap_atomic(dest_buf, KM_USER0);
kunmap_atomic(src_buf, KM_USER1);
- async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
+ async_tx_sync_epilog(cb_fn, cb_param);
}
return tx;
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index f5ff3906b035..5b5eb99bb244 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -72,19 +72,11 @@ async_memset(struct page *dest, int val, unsigned int offset,
dest_buf = (void *) (((char *) page_address(dest)) + offset);
/* wait for any prerequisite operations */
- if (depend_tx) {
- /* if ack is already set then we cannot be sure
- * we are referring to the correct operation
- */
- BUG_ON(depend_tx->ack);
- if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
- panic("%s: DMA_ERROR waiting for depend_tx\n",
- __func__);
- }
+ async_tx_quiesce(&depend_tx);
memset(dest_buf, val, len);
- async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
+ async_tx_sync_epilog(cb_fn, cb_param);
}
return tx;
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 095c798d3170..85eaf7b1c531 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -295,7 +295,7 @@ dma_channel_add_remove(struct dma_client *client,
case DMA_RESOURCE_REMOVED:
found = 0;
spin_lock_irqsave(&async_tx_lock, flags);
- list_for_each_entry_rcu(ref, &async_tx_master_list, node)
+ list_for_each_entry(ref, &async_tx_master_list, node)
if (ref->chan == chan) {
/* permit backing devices to go away */
dma_chan_put(ref->chan);
@@ -608,23 +608,34 @@ async_trigger_callback(enum async_tx_flags flags,
pr_debug("%s: (sync)\n", __func__);
/* wait for any prerequisite operations */
- if (depend_tx) {
- /* if ack is already set then we cannot be sure
- * we are referring to the correct operation
- */
- BUG_ON(async_tx_test_ack(depend_tx));
- if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
- panic("%s: DMA_ERROR waiting for depend_tx\n",
- __func__);
- }
+ async_tx_quiesce(&depend_tx);
- async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
+ async_tx_sync_epilog(cb_fn, cb_param);
}
return tx;
}
EXPORT_SYMBOL_GPL(async_trigger_callback);
+/**
+ * async_tx_quiesce - ensure tx is complete and freeable upon return
+ * @tx - transaction to quiesce
+ */
+void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
+{
+ if (*tx) {
+ /* if ack is already set then we cannot be sure
+ * we are referring to the correct operation
+ */
+ BUG_ON(async_tx_test_ack(*tx));
+ if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
+ panic("DMA_ERROR waiting for transaction\n");
+ async_tx_ack(*tx);
+ *tx = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(async_tx_quiesce);
+
module_init(async_tx_init);
module_exit(async_tx_exit);
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 3a0dddca5a10..65974c6d3d7a 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -35,74 +35,121 @@
* when CONFIG_DMA_ENGINE=n
*/
static __always_inline struct dma_async_tx_descriptor *
-do_async_xor(struct dma_device *device,
- struct dma_chan *chan, struct page *dest, struct page **src_list,
- unsigned int offset, unsigned int src_cnt, size_t len,
- enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
+ unsigned int offset, int src_cnt, size_t len,
+ enum async_tx_flags flags,
+ struct dma_async_tx_descriptor *depend_tx,
+ dma_async_tx_callback cb_fn, void *cb_param)
{
- dma_addr_t dma_dest;
+ struct dma_device *dma = chan->device;
dma_addr_t *dma_src = (dma_addr_t *) src_list;
- struct dma_async_tx_descriptor *tx;
+ struct dma_async_tx_descriptor *tx = NULL;
+ int src_off = 0;
int i;
- unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
-
- pr_debug("%s: len: %zu\n", __func__, len);
-
- dma_dest = dma_map_page(device->dev, dest, offset, len,
- DMA_FROM_DEVICE);
+ dma_async_tx_callback _cb_fn;
+ void *_cb_param;
+ enum async_tx_flags async_flags;
+ enum dma_ctrl_flags dma_flags;
+ int xor_src_cnt;
+ dma_addr_t dma_dest;
+ dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_FROM_DEVICE);
for (i = 0; i < src_cnt; i++)
- dma_src[i] = dma_map_page(device->dev, src_list[i], offset,
+ dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
len, DMA_TO_DEVICE);
- /* Since we have clobbered the src_list we are committed
- * to doing this asynchronously. Drivers force forward progress
- * in case they can not provide a descriptor
- */
- tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len,
- dma_prep_flags);
- if (!tx) {
- if (depend_tx)
- dma_wait_for_async_tx(depend_tx);
-
- while (!tx)
- tx = device->device_prep_dma_xor(chan, dma_dest,
- dma_src, src_cnt, len,
- dma_prep_flags);
- }
+ while (src_cnt) {
+ async_flags = flags;
+ dma_flags = 0;
+ xor_src_cnt = min(src_cnt, dma->max_xor);
+ /* if we are submitting additional xors, leave the chain open,
+ * clear the callback parameters, and leave the destination
+ * buffer mapped
+ */
+ if (src_cnt > xor_src_cnt) {
+ async_flags &= ~ASYNC_TX_ACK;
+ dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
+ _cb_fn = NULL;
+ _cb_param = NULL;
+ } else {
+ _cb_fn = cb_fn;
+ _cb_param = cb_param;
+ }
+ if (_cb_fn)
+ dma_flags |= DMA_PREP_INTERRUPT;
- async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
+ /* Since we have clobbered the src_list we are committed
+ * to doing this asynchronously. Drivers force forward progress
+ * in case they can not provide a descriptor
+ */
+ tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off],
+ xor_src_cnt, len, dma_flags);
+
+ if (unlikely(!tx))
+ async_tx_quiesce(&depend_tx);
+
+ /* spin wait for the preceeding transactions to complete */
+ while (unlikely(!tx)) {
+ dma_async_issue_pending(chan);
+ tx = dma->device_prep_dma_xor(chan, dma_dest,
+ &dma_src[src_off],
+ xor_src_cnt, len,
+ dma_flags);
+ }
+
+ async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn,
+ _cb_param);
+
+ depend_tx = tx;
+ flags |= ASYNC_TX_DEP_ACK;
+
+ if (src_cnt > xor_src_cnt) {
+ /* drop completed sources */
+ src_cnt -= xor_src_cnt;
+ src_off += xor_src_cnt;
+
+ /* use the intermediate result a source */
+ dma_src[--src_off] = dma_dest;
+ src_cnt++;
+ } else
+ break;
+ }
return tx;
}
static void
do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
- unsigned int src_cnt, size_t len, enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+ int src_cnt, size_t len, enum async_tx_flags flags,
+ dma_async_tx_callback cb_fn, void *cb_param)
{
- void *_dest;
int i;
-
- pr_debug("%s: len: %zu\n", __func__, len);
+ int xor_src_cnt;
+ int src_off = 0;
+ void *dest_buf;
+ void **srcs = (void **) src_list;
/* reuse the 'src_list' array to convert to buffer pointers */
for (i = 0; i < src_cnt; i++)
- src_list[i] = (struct page *)
- (page_address(src_list[i]) + offset);
+ srcs[i] = page_address(src_list[i]) + offset;
/* set destination address */
- _dest = page_address(dest) + offset;
+ dest_buf = page_address(dest) + offset;
if (flags & ASYNC_TX_XOR_ZERO_DST)
- memset(_dest, 0, len);
+ memset(dest_buf, 0, len);
- xor_blocks(src_cnt, len, _dest,
- (void **) src_list);
+ while (src_cnt > 0) {
+ /* process up to 'MAX_XOR_BLOCKS' sources */
+ xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
+ xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]);
- async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
+ /* drop completed sources */
+ src_cnt -= xor_src_cnt;
+ src_off += xor_src_cnt;
+ }
+
+ async_tx_sync_epilog(cb_fn, cb_param);
}
/**
@@ -132,106 +179,34 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
&dest, 1, src_list,
src_cnt, len);
- struct dma_device *device = chan ? chan->device : NULL;
- struct dma_async_tx_descriptor *tx = NULL;
- dma_async_tx_callback _cb_fn;
- void *_cb_param;
- unsigned long local_flags;
- int xor_src_cnt;
- int i = 0, src_off = 0;
-
BUG_ON(src_cnt <= 1);
- while (src_cnt) {
- local_flags = flags;
- if (device) { /* run the xor asynchronously */
- xor_src_cnt = min(src_cnt, device->max_xor);
- /* if we are submitting additional xors
- * only set the callback on the last transaction
- */
- if (src_cnt > xor_src_cnt) {
- local_flags &= ~ASYNC_TX_ACK;
- _cb_fn = NULL;
- _cb_param = NULL;
- } else {
- _cb_fn = cb_fn;
- _cb_param = cb_param;
- }
-
- tx = do_async_xor(device, chan, dest,
- &src_list[src_off], offset,
- xor_src_cnt, len, local_flags,
- depend_tx, _cb_fn, _cb_param);
- } else { /* run the xor synchronously */
- /* in the sync case the dest is an implied source
- * (assumes the dest is at the src_off index)
- */
- if (flags & ASYNC_TX_XOR_DROP_DST) {
- src_cnt--;
- src_off++;
- }
-
- /* process up to 'MAX_XOR_BLOCKS' sources */
- xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
-
- /* if we are submitting additional xors
- * only set the callback on the last transaction
- */
- if (src_cnt > xor_src_cnt) {
- local_flags &= ~ASYNC_TX_ACK;
- _cb_fn = NULL;
- _cb_param = NULL;
- } else {
- _cb_fn = cb_fn;
- _cb_param = cb_param;
- }
-
- /* wait for any prerequisite operations */
- if (depend_tx) {
- /* if ack is already set then we cannot be sure
- * we are referring to the correct operation
- */
- BUG_ON(async_tx_test_ack(depend_tx));
- if (dma_wait_for_async_tx(depend_tx) ==
- DMA_ERROR)
- panic("%s: DMA_ERROR waiting for "
- "depend_tx\n",
- __func__);
- }
-
- do_sync_xor(dest, &src_list[src_off], offset,
- xor_src_cnt, len, local_flags, depend_tx,
- _cb_fn, _cb_param);
- }
+ if (chan) {
+ /* run the xor asynchronously */
+ pr_debug("%s (async): len: %zu\n", __func__, len);
- /* the previous tx is hidden from the client,
- * so ack it
- */
- if (i && depend_tx)
- async_tx_ack(depend_tx);
+ return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
+ flags, depend_tx, cb_fn, cb_param);
+ } else {
+ /* run the xor synchronously */
+ pr_debug("%s (sync): len: %zu\n", __func__, len);
- depend_tx = tx;
+ /* in the sync case the dest is an implied source
+ * (assumes the dest is the first source)
+ */
+ if (flags & ASYNC_TX_XOR_DROP_DST) {
+ src_cnt--;
+ src_list++;
+ }
- if (src_cnt > xor_src_cnt) {
- /* drop completed sources */
- src_cnt -= xor_src_cnt;
- src_off += xor_src_cnt;
+ /* wait for any prerequisite operations */
+ async_tx_quiesce(&depend_tx);
- /* unconditionally preserve the destination */
- flags &= ~ASYNC_TX_XOR_ZERO_DST;
+ do_sync_xor(dest, src_list, offset, src_cnt, len,
+ flags, cb_fn, cb_param);
- /* use the intermediate result a source, but remember
- * it's dropped, because it's implied, in the sync case
- */
- src_list[--src_off] = dest;
- src_cnt++;
- flags |= ASYNC_TX_XOR_DROP_DST;
- } else
- src_cnt = 0;
- i++;
+ return NULL;
}
-
- return tx;
}
EXPORT_SYMBOL_GPL(async_xor);
@@ -285,11 +260,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
len, result,
dma_prep_flags);
- if (!tx) {
- if (depend_tx)
- dma_wait_for_async_tx(depend_tx);
+ if (unlikely(!tx)) {
+ async_tx_quiesce(&depend_tx);
while (!tx)
+ dma_async_issue_pending(chan);
tx = device->device_prep_dma_zero_sum(chan,
dma_src, src_cnt, len, result,
dma_prep_flags);
@@ -307,18 +282,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
depend_tx, NULL, NULL);
- if (tx) {
- if (dma_wait_for_async_tx(tx) == DMA_ERROR)
- panic("%s: DMA_ERROR waiting for tx\n",
- __func__);
- async_tx_ack(tx);
- }
+ async_tx_quiesce(&tx);
*result = page_is_zero(dest, offset, len) ? 0 : 1;
- tx = NULL;
-
- async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
+ async_tx_sync_epilog(cb_fn, cb_param);
}
return tx;