summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
authorHuang Shijie <b32955@freescale.com>2012-02-16 14:17:33 +0800
committerDavid Woodhouse <David.Woodhouse@intel.com>2012-03-27 00:37:28 +0100
commit921de864b7c6413f15224d8f5e677541e8e1ac6d (patch)
tree325815e4a65a26b961796314fdb0b2cd6e0b9975 /drivers/dma
parent3946860409130038ef6e0e5c50f2203053eae2b7 (diff)
downloadlinux-3.10-921de864b7c6413f15224d8f5e677541e8e1ac6d.tar.gz
linux-3.10-921de864b7c6413f15224d8f5e677541e8e1ac6d.tar.bz2
linux-3.10-921de864b7c6413f15224d8f5e677541e8e1ac6d.zip
mxs-dma : rewrite the last parameter of mxs_dma_prep_slave_sg()
[1] Background : The GPMI does ECC read page operation with a DMA chain consist of three DMA Command Structures. The middle one of the chain is used to enable the BCH, and read out the NAND page. The WAIT4END(wait for command end) is a comunication signal between the GPMI and MXS-DMA. [2] The current DMA code sets the WAIT4END bit at the last one, such as: +-----+ +-----+ +-----+ | cmd | ------------> | cmd | ------------------> | cmd | +-----+ +-----+ +-----+ ^ | | set WAIT4END here This chain works fine in the mx23/mx28. [3] But in the new GPMI version (used in MX50/MX60), the WAIT4END bit should be set not only at the last DMA Command Structure, but also at the middle one, such as: +-----+ +-----+ +-----+ | cmd | ------------> | cmd | ------------------> | cmd | +-----+ +-----+ +-----+ ^ ^ | | | | set WAIT4END here too set WAIT4END here If we do not set WAIT4END, the BCH maybe stalls in "ECC reading page" state. In the next ECC write page operation, a DMA-timeout occurs. This has been catched in the MX6Q board. [4] In order to fix the bug, rewrite the last parameter of mxs_dma_prep_slave_sg(), and use the dma_ctrl_flags: --------------------------------------------------------- DMA_PREP_INTERRUPT : append a new DMA Command Structrue. DMA_CTRL_ACK : set the WAIT4END bit for this DMA Command Structure. --------------------------------------------------------- [5] changes to the relative drivers: <1> For mxs-mmc driver, just use the new flags, do not change any logic. <2> For gpmi-nand driver, and use the new flags to set the DMA chain, especially for ecc read page. Acked-by: Shawn Guo <shawn.guo@linaro.org> Signed-off-by: Huang Shijie <b32955@freescale.com> Acked-by: Vinod Koul <vinod.koul@linux.intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/mxs-dma.c32
1 files changed, 28 insertions, 4 deletions
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 0afcedbe247..0ddfd30b56a 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -349,10 +349,32 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
clk_disable_unprepare(mxs_dma->clk);
}
+/*
+ * How to use the flags for ->device_prep_slave_sg() :
+ * [1] If there is only one DMA command in the DMA chain, the code should be:
+ * ......
+ * ->device_prep_slave_sg(DMA_CTRL_ACK);
+ * ......
+ * [2] If there are two DMA commands in the DMA chain, the code should be
+ * ......
+ * ->device_prep_slave_sg(0);
+ * ......
+ * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ * ......
+ * [3] If there are more than two DMA commands in the DMA chain, the code
+ * should be:
+ * ......
+ * ->device_prep_slave_sg(0); // First
+ * ......
+ * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
+ * ......
+ * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
+ * ......
+ */
static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long append)
+ unsigned long flags)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -360,6 +382,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct scatterlist *sg;
int i, j;
u32 *pio;
+ bool append = flags & DMA_PREP_INTERRUPT;
int idx = append ? mxs_chan->desc_count : 0;
if (mxs_chan->status == DMA_IN_PROGRESS && !append)
@@ -386,7 +409,6 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits |= CCW_CHAIN;
ccw->bits &= ~CCW_IRQ;
ccw->bits &= ~CCW_DEC_SEM;
- ccw->bits &= ~CCW_WAIT4END;
} else {
idx = 0;
}
@@ -401,7 +423,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits = 0;
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_DEC_SEM;
- ccw->bits |= CCW_WAIT4END;
+ if (flags & DMA_CTRL_ACK)
+ ccw->bits |= CCW_WAIT4END;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
ccw->bits |= BF_CCW(sg_len, PIO_NUM);
@@ -432,7 +455,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits &= ~CCW_CHAIN;
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_DEC_SEM;
- ccw->bits |= CCW_WAIT4END;
+ if (flags & DMA_CTRL_ACK)
+ ccw->bits |= CCW_WAIT4END;
}
}
}