summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@stericsson.com>2010-03-26 16:44:01 -0700
committerDan Williams <dan.j.williams@intel.com>2010-03-26 16:44:01 -0700
commitc3635c78e500a52c9fcd55de381a72928d9e054d (patch)
tree87403f402227cd8b5572550e70facf81c9eaa0d9 /drivers/dma
parent0f65169b1bf44220308e1ce1f6666ad03ddc27af (diff)
downloadlinux-3.10-c3635c78e500a52c9fcd55de381a72928d9e054d.tar.gz
linux-3.10-c3635c78e500a52c9fcd55de381a72928d9e054d.tar.bz2
linux-3.10-c3635c78e500a52c9fcd55de381a72928d9e054d.zip
DMAENGINE: generic slave control v2
Convert the device_terminate_all() operation on the DMA engine to a generic device_control() operation which can now optionally support also pausing and resuming DMA on a certain channel. Implemented for the COH 901 318 DMAC as an example. [dan.j.williams@intel.com: update for timberdale] Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Acked-by: Mark Brown <broonie@opensource.wolfsonmicro.com> Cc: Maciej Sosnowski <maciej.sosnowski@intel.com> Cc: Nicolas Ferre <nicolas.ferre@atmel.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: Li Yang <leoli@freescale.com> Cc: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Haavard Skinnemoen <haavard.skinnemoen@atmel.com> Cc: Magnus Damm <damm@opensource.se> Cc: Liam Girdwood <lrg@slimlogic.co.uk> Cc: Joe Perches <joe@perches.com> Cc: Roland Dreier <rdreier@cisco.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/at_hdmac.c10
-rw-r--r--drivers/dma/coh901318.c42
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dw_dmac.c10
-rw-r--r--drivers/dma/fsldma.c13
-rw-r--r--drivers/dma/ipu/ipu_idmac.c21
-rw-r--r--drivers/dma/shdma.c12
-rw-r--r--drivers/dma/timb_dma.c9
-rw-r--r--drivers/dma/txx9dmac.c10
9 files changed, 94 insertions, 35 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index efc1a61ca23..f9143cf9e50 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -759,13 +759,17 @@ err_desc_get:
return NULL;
}
-static void atc_terminate_all(struct dma_chan *chan)
+static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc, *_desc;
LIST_HEAD(list);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
/*
* This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the
@@ -789,6 +793,8 @@ static void atc_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc);
+
+ return 0;
}
/**
@@ -1091,7 +1097,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
- atdma->dma_common.device_terminate_all = atc_terminate_all;
+ atdma->dma_common.device_control = atc_control;
}
dma_writel(atdma, EN, AT_DMA_ENABLE);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index f636c4a87c7..53c54e034aa 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -506,10 +506,11 @@ u32 coh901318_get_bytes_left(struct dma_chan *chan)
EXPORT_SYMBOL(coh901318_get_bytes_left);
-/* Stops a transfer without losing data. Enables power save.
- Use this function in conjunction with coh901318_continue(..)
-*/
-void coh901318_stop(struct dma_chan *chan)
+/*
+ * Pauses a transfer without losing data. Enables power save.
+ * Use this function in conjunction with coh901318_resume.
+ */
+static void coh901318_pause(struct dma_chan *chan)
{
u32 val;
unsigned long flags;
@@ -550,12 +551,11 @@ void coh901318_stop(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
}
-EXPORT_SYMBOL(coh901318_stop);
-/* Continues a transfer that has been stopped via 300_dma_stop(..).
+/* Resumes a transfer that has been stopped via 300_dma_stop(..).
Power save is handled.
*/
-void coh901318_continue(struct dma_chan *chan)
+static void coh901318_resume(struct dma_chan *chan)
{
u32 val;
unsigned long flags;
@@ -581,7 +581,6 @@ void coh901318_continue(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
}
-EXPORT_SYMBOL(coh901318_continue);
bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
{
@@ -945,7 +944,7 @@ coh901318_free_chan_resources(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
- chan->device->device_terminate_all(chan);
+ chan->device->device_control(chan, DMA_TERMINATE_ALL);
}
@@ -1179,16 +1178,29 @@ coh901318_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
}
-static void
-coh901318_terminate_all(struct dma_chan *chan)
+static int
+coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{
unsigned long flags;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
struct coh901318_desc *cohd;
void __iomem *virtbase = cohc->base->virtbase;
- coh901318_stop(chan);
+ if (cmd == DMA_PAUSE) {
+ coh901318_pause(chan);
+ return 0;
+ }
+
+ if (cmd == DMA_RESUME) {
+ coh901318_resume(chan);
+ return 0;
+ }
+
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+ /* The remainder of this function terminates the transfer */
+ coh901318_pause(chan);
spin_lock_irqsave(&cohc->lock, flags);
/* Clear any pending BE or TC interrupt */
@@ -1227,6 +1239,8 @@ coh901318_terminate_all(struct dma_chan *chan)
cohc->busy = 0;
spin_unlock_irqrestore(&cohc->lock, flags);
+
+ return 0;
}
void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
struct coh901318_base *base)
@@ -1344,7 +1358,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete;
base->dma_slave.device_issue_pending = coh901318_issue_pending;
- base->dma_slave.device_terminate_all = coh901318_terminate_all;
+ base->dma_slave.device_control = coh901318_control;
base->dma_slave.dev = &pdev->dev;
err = dma_async_device_register(&base->dma_slave);
@@ -1364,7 +1378,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete;
base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
- base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
+ base->dma_memcpy.device_control = coh901318_control;
base->dma_memcpy.dev = &pdev->dev;
/*
* This controller can only access address at even 32bit boundaries,
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 87399cafce3..ffc4ee9c5e2 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -694,7 +694,7 @@ int dma_async_device_register(struct dma_device *device)
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_prep_slave_sg);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
- !device->device_terminate_all);
+ !device->device_control);
BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources);
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index d28369f7afd..8a6b85f6117 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -781,13 +781,17 @@ err_desc_get:
return NULL;
}
-static void dwc_terminate_all(struct dma_chan *chan)
+static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
LIST_HEAD(list);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
/*
* This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the
@@ -810,6 +814,8 @@ static void dwc_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
dwc_descriptor_complete(dwc, desc);
+
+ return 0;
}
static enum dma_status
@@ -1338,7 +1344,7 @@ static int __init dw_probe(struct platform_device *pdev)
dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
- dw->dma.device_terminate_all = dwc_terminate_all;
+ dw->dma.device_control = dwc_control;
dw->dma.device_is_tx_complete = dwc_is_tx_complete;
dw->dma.device_issue_pending = dwc_issue_pending;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index bbb4be5a3ff..714fc46e769 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -774,13 +774,18 @@ fail:
return NULL;
}
-static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
+static int fsl_dma_device_control(struct dma_chan *dchan,
+ enum dma_ctrl_cmd cmd)
{
struct fsldma_chan *chan;
unsigned long flags;
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
if (!dchan)
- return;
+ return -EINVAL;
chan = to_fsl_chan(dchan);
@@ -794,6 +799,8 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
fsldma_free_desc_list(chan, &chan->ld_running);
spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ return 0;
}
/**
@@ -1332,7 +1339,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
fdev->common.device_is_tx_complete = fsl_dma_is_complete;
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
- fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
+ fdev->common.device_control = fsl_dma_device_control;
fdev->common.dev = &op->dev;
dev_set_drvdata(&op->dev, fdev);
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 2a446397c88..39e7fb2a90e 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1472,13 +1472,17 @@ static void idmac_issue_pending(struct dma_chan *chan)
*/
}
-static void __idmac_terminate_all(struct dma_chan *chan)
+static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac *idmac = to_idmac(chan->device);
unsigned long flags;
int i;
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
ipu_disable_channel(idmac, ichan,
ichan->status >= IPU_CHANNEL_ENABLED);
@@ -1505,17 +1509,22 @@ static void __idmac_terminate_all(struct dma_chan *chan)
tasklet_enable(&to_ipu(idmac)->tasklet);
ichan->status = IPU_CHANNEL_INITIALIZED;
+
+ return 0;
}
-static void idmac_terminate_all(struct dma_chan *chan)
+static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
+ int ret;
mutex_lock(&ichan->chan_mutex);
- __idmac_terminate_all(chan);
+ ret = __idmac_control(chan, cmd);
mutex_unlock(&ichan->chan_mutex);
+
+ return ret;
}
#ifdef DEBUG
@@ -1607,7 +1616,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
mutex_lock(&ichan->chan_mutex);
- __idmac_terminate_all(chan);
+ __idmac_control(chan, DMA_TERMINATE_ALL);
if (ichan->status > IPU_CHANNEL_FREE) {
#ifdef DEBUG
@@ -1669,7 +1678,7 @@ static int __init ipu_idmac_init(struct ipu *ipu)
/* Compulsory for DMA_SLAVE fields */
dma->device_prep_slave_sg = idmac_prep_slave_sg;
- dma->device_terminate_all = idmac_terminate_all;
+ dma->device_control = idmac_control;
INIT_LIST_HEAD(&dma->channels);
for (i = 0; i < IPU_CHANNELS_NUM; i++) {
@@ -1703,7 +1712,7 @@ static void __exit ipu_idmac_exit(struct ipu *ipu)
for (i = 0; i < IPU_CHANNELS_NUM; i++) {
struct idmac_channel *ichan = ipu->channel + i;
- idmac_terminate_all(&ichan->dma_chan);
+ idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL);
idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
}
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 5d17e09cb62..ce28c1e2282 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -580,12 +580,16 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
direction, flags);
}
-static void sh_dmae_terminate_all(struct dma_chan *chan)
+static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
if (!chan)
- return;
+ return -EINVAL;
dmae_halt(sh_chan);
@@ -601,6 +605,8 @@ static void sh_dmae_terminate_all(struct dma_chan *chan)
spin_unlock_bh(&sh_chan->desc_lock);
sh_dmae_chan_ld_cleanup(sh_chan, true);
+
+ return 0;
}
static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
@@ -1029,7 +1035,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
/* Compulsory for DMA_SLAVE fields */
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
- shdev->common.device_terminate_all = sh_dmae_terminate_all;
+ shdev->common.device_control = sh_dmae_control;
shdev->common.dev = &pdev->dev;
/* Default transfer size of 32 bytes requires 32-byte alignment */
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 145f1c23408..7c06471ef86 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -613,7 +613,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
return &td_desc->txd;
}
-static void td_terminate_all(struct dma_chan *chan)
+static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
@@ -621,6 +621,9 @@ static void td_terminate_all(struct dma_chan *chan)
dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
/* first the easy part, put the queue into the free list */
spin_lock_bh(&td_chan->lock);
list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
@@ -630,6 +633,8 @@ static void td_terminate_all(struct dma_chan *chan)
/* now tear down the runnning */
__td_finish(td_chan);
spin_unlock_bh(&td_chan->lock);
+
+ return 0;
}
static void td_tasklet(unsigned long data)
@@ -743,7 +748,7 @@ static int __devinit td_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
td->dma.device_prep_slave_sg = td_prep_slave_sg;
- td->dma.device_terminate_all = td_terminate_all;
+ td->dma.device_control = td_control;
td->dma.dev = &pdev->dev;
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 3ebc61067e5..e528e15f44a 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -938,12 +938,16 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return &first->txd;
}
-static void txx9dmac_terminate_all(struct dma_chan *chan)
+static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
struct txx9dmac_desc *desc, *_desc;
LIST_HEAD(list);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -EINVAL;
+
dev_vdbg(chan2dev(chan), "terminate_all\n");
spin_lock_bh(&dc->lock);
@@ -958,6 +962,8 @@ static void txx9dmac_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
txx9dmac_descriptor_complete(dc, desc);
+
+ return 0;
}
static enum dma_status
@@ -1153,7 +1159,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
dc->dma.dev = &pdev->dev;
dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
- dc->dma.device_terminate_all = txx9dmac_terminate_all;
+ dc->dma.device_control = txx9dmac_control;
dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete;
dc->dma.device_issue_pending = txx9dmac_issue_pending;
if (pdata && pdata->memcpy_chan == ch) {