diff options
author | Jonas Aaberg <jonas.aberg@stericsson.com> | 2010-06-20 21:25:31 +0000 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2010-06-22 18:01:54 -0700 |
commit | 0d0f6b8bbb13e1287f4f2a271e4f4306789511a2 (patch) | |
tree | b1ed6b7293cdb16a658b18ce3640bb2354cdaca3 /drivers/dma | |
parent | 2a6143407d9114a0c5d16a7eed1a0892a4ce9f19 (diff) | |
download | kernel-common-0d0f6b8bbb13e1287f4f2a271e4f4306789511a2.tar.gz kernel-common-0d0f6b8bbb13e1287f4f2a271e4f4306789511a2.tar.bz2 kernel-common-0d0f6b8bbb13e1287f4f2a271e4f4306789511a2.zip |
DMAENGINE: ste_dma40: deny ops on non-alloc channels
Added checks to deny operating on none-allocated channels.
Signed-off-by: Jonas Aaberg <jonas.aberg@stericsson.com>
Signed-off-by: Linus Walleij <linus.walleij@stericsson.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/ste_dma40.c | 44 |
1 files changed, 44 insertions, 0 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 1d176642e523..4d56d214fa05 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1515,6 +1515,12 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, chan); unsigned long flags; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Unallocated channel.\n", __func__); + return ERR_PTR(-EINVAL); + } + spin_lock_irqsave(&d40c->lock, flags); d40d = d40_desc_get(d40c); @@ -1710,6 +1716,13 @@ static void d40_free_chan_resources(struct dma_chan *chan) int err; unsigned long flags; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Cannot free unallocated channel\n", __func__); + return; + } + + spin_lock_irqsave(&d40c->lock, flags); err = d40_free_dma(d40c); @@ -1732,6 +1745,12 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, unsigned long flags; int err = 0; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Channel is not allocated.\n", __func__); + return ERR_PTR(-EINVAL); + } + spin_lock_irqsave(&d40c->lock, flags); d40d = d40_desc_get(d40c); @@ -1947,6 +1966,12 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, unsigned long flags; int err; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Cannot prepare unallocated channel\n", __func__); + return ERR_PTR(-EINVAL); + } + if (d40c->dma_cfg.pre_transfer) d40c->dma_cfg.pre_transfer(chan, d40c->dma_cfg.pre_transfer_data, @@ -1993,6 +2018,13 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, dma_cookie_t last_complete; int ret; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Cannot read status of unallocated channel\n", + __func__); + return -EINVAL; + } + last_complete = d40c->completed; last_used = chan->cookie; @@ -2012,6 +2044,12 @@ static void d40_issue_pending(struct dma_chan *chan) struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); unsigned long flags; + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Channel is not allocated!\n", __func__); + return; + } + spin_lock_irqsave(&d40c->lock, flags); /* Busy means that pending jobs are already being processed */ @@ -2027,6 +2065,12 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long flags; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); + if (d40c->phy_chan == NULL) { + dev_err(&d40c->chan.dev->device, + "[%s] Channel is not allocated!\n", __func__); + return -EINVAL; + } + switch (cmd) { case DMA_TERMINATE_ALL: spin_lock_irqsave(&d40c->lock, flags); |