summaryrefslogtreecommitdiff
path: root/patches.tizen/0790-dmaengine-add-dma_slave_get_caps-api.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches.tizen/0790-dmaengine-add-dma_slave_get_caps-api.patch')
-rw-r--r--patches.tizen/0790-dmaengine-add-dma_slave_get_caps-api.patch98
1 files changed, 98 insertions, 0 deletions
diff --git a/patches.tizen/0790-dmaengine-add-dma_slave_get_caps-api.patch b/patches.tizen/0790-dmaengine-add-dma_slave_get_caps-api.patch
new file mode 100644
index 00000000000..91384d16496
--- /dev/null
+++ b/patches.tizen/0790-dmaengine-add-dma_slave_get_caps-api.patch
@@ -0,0 +1,98 @@
+From 3bc81b2536a636ecf1e6c3a37c24dd28c1b0b3e4 Mon Sep 17 00:00:00 2001
+From: Vinod Koul <vinod.koul@intel.com>
+Date: Mon, 8 Jul 2013 14:15:25 +0530
+Subject: [PATCH 0790/1302] dmaengine: add dma_slave_get_caps api
+
+add new device callback .device_slave_caps api which can be used by clients to
+query the dma channel capablties before they program the channel. This can help
+is removing errors during the channel programming. Also add helper
+dma_slave_get_caps API
+
+This patch folds the work done by Matt earlier
+https://patchwork.kernel.org/patch/2094891/
+
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: MyungJoo Ham <myungjoo.ham@samsung.com>
+---
+ include/linux/dmaengine.h | 44 ++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 44 insertions(+)
+
+diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
+index 96d3e4a..5642335 100644
+--- a/include/linux/dmaengine.h
++++ b/include/linux/dmaengine.h
+@@ -371,6 +371,33 @@ struct dma_slave_config {
+ unsigned int slave_id;
+ };
+
++/* struct dma_slave_caps - expose capabilities of a slave channel only
++ *
++ * @src_addr_widths: bit mask of src addr widths the channel supports
++ * @dstn_addr_widths: bit mask of dstn addr widths the channel supports
++ * @directions: bit mask of slave direction the channel supported
++ * since the enum dma_transfer_direction is not defined as bits for each
++ * type of direction, the dma controller should fill (1 << <TYPE>) and same
++ * should be checked by controller as well
++ * @cmd_pause: true, if pause and thereby resume is supported
++ * @cmd_terminate: true, if terminate cmd is supported
++ *
++ * @max_sg_nr: maximum number of SG segments supported
++ * 0 for no maximum
++ * @max_sg_len: maximum length of a SG segment supported
++ * 0 for no maximum
++ */
++struct dma_slave_caps {
++ u32 src_addr_widths;
++ u32 dstn_addr_widths;
++ u32 directions;
++ bool cmd_pause;
++ bool cmd_terminate;
++
++ u32 max_sg_nr;
++ u32 max_sg_len;
++};
++
+ static inline const char *dma_chan_name(struct dma_chan *chan)
+ {
+ return dev_name(&chan->dev->device);
+@@ -534,6 +561,7 @@ struct dma_tx_state {
+ * struct with auxiliary transfer status information, otherwise the call
+ * will just return a simple status code
+ * @device_issue_pending: push pending transactions to hardware
++ * @device_slave_caps: return the slave channel capabilities
+ */
+ struct dma_device {
+
+@@ -602,6 +630,7 @@ struct dma_device {
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate);
+ void (*device_issue_pending)(struct dma_chan *chan);
++ int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
+ };
+
+ static inline int dmaengine_device_control(struct dma_chan *chan,
+@@ -675,6 +704,21 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
+ return chan->device->device_prep_interleaved_dma(chan, xt, flags);
+ }
+
++static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
++{
++ if (!chan || !caps)
++ return -EINVAL;
++
++ /* check if the channel supports slave transactions */
++ if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
++ return -ENXIO;
++
++ if (chan->device->device_slave_caps)
++ return chan->device->device_slave_caps(chan, caps);
++
++ return -ENXIO;
++}
++
+ static inline int dmaengine_terminate_all(struct dma_chan *chan)
+ {
+ return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
+--
+1.8.3.2
+