From c5a9f9d0895b2c16908979244d3d678fd6db0545 Mon Sep 17 00:00:00 2001 From: Tomoya MORINAGA Date: Fri, 18 Feb 2011 10:01:20 +0530 Subject: pch_dma: fix kernel error issue fix the following kernel error ------------[ cut here ]------------ WARNING: at kernel/softirq.c:159 _local_bh_enable_ip.clone.5+0x35/0x71() Hardware name: To be filled by O.E.M. Modules linked in: pch_uart pch_dma fuse mga drm cpufreq_ondemand acpi_cpufreq mperf ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6 ip6table_filter ip6_tables ipv6 uinput snd_hda_codec_realtek snd_hda_intel snd_hda_codec matroxfb_base snd_hwdep 8250_pnp snd_seq snd_seq_device matroxfb_DAC1064 snd_pcm joydev 8250 matroxfb_accel snd_timer matroxfb_Ti3026 ppdev pegasus parport_pc snd parport matroxfb_g450 g450_pll serial_core video output matroxfb_misc soundcore snd_page_alloc serio_raw pcspkr ext4 jbd2 crc16 sdhci_pci sdhci mmc_core floppy [last unloaded: scsi_wait_scan] Pid: 0, comm: swapper Not tainted 2.6.37.upstream_check+ #8 Call Trace: [] warn_slowpath_common+0x65/0x7a [] ? _local_bh_enable_ip.clone.5+0x35/0x71 [] warn_slowpath_null+0xf/0x13 [] _local_bh_enable_ip.clone.5+0x35/0x71 [] local_bh_enable_ip+0x8/0xa [] _raw_spin_unlock_bh+0x10/0x12 [] pd_prep_slave_sg+0xba/0x200 [pch_dma] [] pch_uart_interrupt+0x44d/0x6aa [pch_uart] [] handle_IRQ_event+0x1d/0x9e [] handle_fasteoi_irq+0x90/0xc7 [] ? handle_fasteoi_irq+0x0/0xc7 [] ? do_IRQ+0x3e/0x89 [] ? common_interrupt+0x29/0x30 [] ? sys_getpriority+0x12d/0x1a2 [] ? arch_local_irq_enable+0x5/0xb [] ? acpi_idle_enter_bm+0x22a/0x261 [] ? cpuidle_idle_call+0x70/0xa1 [] ? cpu_idle+0x49/0x6a [] ? rest_init+0x58/0x5a [] ? start_kernel+0x2d0/0x2d5 [] ? i386_start_kernel+0xce/0xd5 Signed-off-by: Tomoya MORINAGA Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 1c38418ae61..bf2ddd601dc 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -366,7 +366,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); dma_cookie_t cookie; - spin_lock_bh(&pd_chan->lock); + spin_lock(&pd_chan->lock); cookie = pdc_assign_cookie(pd_chan, desc); if (list_empty(&pd_chan->active_list)) { @@ -376,7 +376,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) list_add_tail(&desc->desc_node, &pd_chan->queue); } - spin_unlock_bh(&pd_chan->lock); + spin_unlock(&pd_chan->lock); return 0; } @@ -386,7 +386,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) struct pch_dma *pd = to_pd(chan->device); dma_addr_t addr; - desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr); + desc = pci_pool_alloc(pd->pool, flags, &addr); if (desc) { memset(desc, 0, sizeof(struct pch_dma_desc)); INIT_LIST_HEAD(&desc->tx_list); @@ -405,7 +405,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) struct pch_dma_desc *ret = NULL; int i; - spin_lock_bh(&pd_chan->lock); + spin_lock(&pd_chan->lock); list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { i++; if (async_tx_test_ack(&desc->txd)) { @@ -415,15 +415,15 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) } dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); } - spin_unlock_bh(&pd_chan->lock); + spin_unlock(&pd_chan->lock); dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); if (!ret) { ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); if (ret) { - spin_lock_bh(&pd_chan->lock); + spin_lock(&pd_chan->lock); pd_chan->descs_allocated++; - spin_unlock_bh(&pd_chan->lock); + spin_unlock(&pd_chan->lock); } else { dev_err(chan2dev(&pd_chan->chan), "failed to alloc desc\n"); @@ -437,10 +437,10 @@ static void pdc_desc_put(struct pch_dma_chan *pd_chan, struct pch_dma_desc *desc) { if (desc) { - spin_lock_bh(&pd_chan->lock); + spin_lock(&pd_chan->lock); list_splice_init(&desc->tx_list, &pd_chan->free_list); list_add(&desc->desc_node, &pd_chan->free_list); - spin_unlock_bh(&pd_chan->lock); + spin_unlock(&pd_chan->lock); } } @@ -530,9 +530,9 @@ static void pd_issue_pending(struct dma_chan *chan) struct pch_dma_chan *pd_chan = to_pd_chan(chan); if (pdc_is_idle(pd_chan)) { - spin_lock_bh(&pd_chan->lock); + spin_lock(&pd_chan->lock); pdc_advance_work(pd_chan); - spin_unlock_bh(&pd_chan->lock); + spin_unlock(&pd_chan->lock); } } @@ -592,7 +592,6 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, goto err_desc_get; } - if (!first) { first = desc; } else { @@ -641,13 +640,13 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, spin_unlock_bh(&pd_chan->lock); - return 0; } static void pdc_tasklet(unsigned long data) { struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; + unsigned long flags; if (!pdc_is_idle(pd_chan)) { dev_err(chan2dev(&pd_chan->chan), @@ -655,12 +654,12 @@ static void pdc_tasklet(unsigned long data) return; } - spin_lock_bh(&pd_chan->lock); + spin_lock_irqsave(&pd_chan->lock, flags); if (test_and_clear_bit(0, &pd_chan->err_status)) pdc_handle_error(pd_chan); else pdc_advance_work(pd_chan); - spin_unlock_bh(&pd_chan->lock); + spin_unlock_irqrestore(&pd_chan->lock, flags); } static irqreturn_t pd_irq(int irq, void *devid) -- cgit v1.2.3 From 26d890f0d09fd58f7194aad651e86283cb9e6574 Mon Sep 17 00:00:00 2001 From: Tomoya MORINAGA Date: Fri, 18 Feb 2011 10:01:21 +0530 Subject: pch_dma: set the number of array correctly set the number of array correctly. Signed-off-by: Tomoya MORINAGA Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index bf2ddd601dc..b1dfba7e37b 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -82,7 +82,7 @@ struct pch_dma_regs { u32 dma_sts1; u32 reserved2; u32 reserved3; - struct pch_dma_desc_regs desc[0]; + struct pch_dma_desc_regs desc[MAX_CHAN_NR]; }; struct pch_dma_desc { @@ -124,7 +124,7 @@ struct pch_dma { struct pci_pool *pool; struct pch_dma_regs regs; struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; - struct pch_dma_chan channels[0]; + struct pch_dma_chan channels[MAX_CHAN_NR]; }; #define PCH_DMA_CTL0 0x00 -- cgit v1.2.3 From a580b8c5429a624d120cd603e1498bf676e2b4da Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Sun, 27 Feb 2011 00:47:42 +0800 Subject: dmaengine: mxs-dma: add dma support for i.MX23/28 This patch adds dma support for Freescale MXS-based SoC i.MX23/28, including apbh-dma and apbx-dma. * apbh-dma and apbx-dma are supported in the driver as two mxs-dma instances. * apbh-dma is different between mx23 and mx28, hardware version register is used to differentiate. * mxs-dma supports pio function besides data transfer. The driver uses dma_data_direction DMA_NONE to identify the pio mode, and steals sgl and sg_len to get pio words and numbers from clients. * mxs dmaengine has some very specific features, like sense function and the special NAND support (nand_lock, nand_wait4ready). These are too specific to implemented in generic dmaengine driver. * The driver refers to imx-sdma and only a single descriptor is statically assigned to each channel. Signed-off-by: Shawn Guo Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 8 + drivers/dma/Makefile | 1 + drivers/dma/mxs-dma.c | 724 ++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 733 insertions(+) create mode 100644 drivers/dma/mxs-dma.c (limited to 'drivers') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 1c28816152f..76f6472ba31 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -227,6 +227,14 @@ config IMX_DMA Support the i.MX DMA engine. This engine is integrated into Freescale i.MX1/21/27 chips. +config MXS_DMA + bool "MXS DMA support" + depends on SOC_IMX23 || SOC_IMX28 + select DMA_ENGINE + help + Support the MXS DMA engine. This engine including APBH-DMA + and APBX-DMA is integrated into Freescale i.MX23/28 chips. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 64b21f5cd74..802b55795f8 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o obj-$(CONFIG_IMX_DMA) += imx-dma.o +obj-$(CONFIG_MXS_DMA) += mxs-dma.o obj-$(CONFIG_TIMB_DMA) += timb_dma.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_PL330_DMA) += pl330.o diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c new file mode 100644 index 00000000000..88aad4f5400 --- /dev/null +++ b/drivers/dma/mxs-dma.c @@ -0,0 +1,724 @@ +/* + * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. + * + * Refer to drivers/dma/imx-sdma.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * NOTE: The term "PIO" throughout the mxs-dma implementation means + * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, + * dma can program the controller registers of peripheral devices. + */ + +#define MXS_DMA_APBH 0 +#define MXS_DMA_APBX 1 +#define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH) + +#define APBH_VERSION_LATEST 3 +#define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST) + +#define HW_APBHX_CTRL0 0x000 +#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) +#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) +#define BP_APBH_CTRL0_CLKGATE_CHANNEL 8 +#define BP_APBH_CTRL0_RESET_CHANNEL 16 +#define HW_APBHX_CTRL1 0x010 +#define HW_APBHX_CTRL2 0x020 +#define HW_APBHX_CHANNEL_CTRL 0x030 +#define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 +#define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800) +#define HW_APBX_VERSION 0x800 +#define BP_APBHX_VERSION_MAJOR 24 +#define HW_APBHX_CHn_NXTCMDAR(n) \ + (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70) +#define HW_APBHX_CHn_SEMA(n) \ + (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70) + +/* + * ccw bits definitions + * + * COMMAND: 0..1 (2) + * CHAIN: 2 (1) + * IRQ: 3 (1) + * NAND_LOCK: 4 (1) - not implemented + * NAND_WAIT4READY: 5 (1) - not implemented + * DEC_SEM: 6 (1) + * WAIT4END: 7 (1) + * HALT_ON_TERMINATE: 8 (1) + * TERMINATE_FLUSH: 9 (1) + * RESERVED: 10..11 (2) + * PIO_NUM: 12..15 (4) + */ +#define BP_CCW_COMMAND 0 +#define BM_CCW_COMMAND (3 << 0) +#define CCW_CHAIN (1 << 2) +#define CCW_IRQ (1 << 3) +#define CCW_DEC_SEM (1 << 6) +#define CCW_WAIT4END (1 << 7) +#define CCW_HALT_ON_TERM (1 << 8) +#define CCW_TERM_FLUSH (1 << 9) +#define BP_CCW_PIO_NUM 12 +#define BM_CCW_PIO_NUM (0xf << 12) + +#define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field) + +#define MXS_DMA_CMD_NO_XFER 0 +#define MXS_DMA_CMD_WRITE 1 +#define MXS_DMA_CMD_READ 2 +#define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */ + +struct mxs_dma_ccw { + u32 next; + u16 bits; + u16 xfer_bytes; +#define MAX_XFER_BYTES 0xff00 + u32 bufaddr; +#define MXS_PIO_WORDS 16 + u32 pio_words[MXS_PIO_WORDS]; +}; + +#define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) + +struct mxs_dma_chan { + struct mxs_dma_engine *mxs_dma; + struct dma_chan chan; + struct dma_async_tx_descriptor desc; + struct tasklet_struct tasklet; + int chan_irq; + struct mxs_dma_ccw *ccw; + dma_addr_t ccw_phys; + dma_cookie_t last_completed; + enum dma_status status; + unsigned int flags; +#define MXS_DMA_SG_LOOP (1 << 0) +}; + +#define MXS_DMA_CHANNELS 16 +#define MXS_DMA_CHANNELS_MASK 0xffff + +struct mxs_dma_engine { + int dev_id; + unsigned int version; + void __iomem *base; + struct clk *clk; + struct dma_device dma_device; + struct device_dma_parameters dma_parms; + struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; +}; + +static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) +{ + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + int chan_id = mxs_chan->chan.chan_id; + + if (dma_is_apbh() && apbh_is_old()) + writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + else + writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), + mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); +} + +static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) +{ + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + int chan_id = mxs_chan->chan.chan_id; + + /* set cmd_addr up */ + writel(mxs_chan->ccw_phys, + mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); + + /* enable apbh channel clock */ + if (dma_is_apbh()) { + if (apbh_is_old()) + writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), + mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); + else + writel(1 << chan_id, + mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); + } + + /* write 1 to SEMA to kick off the channel */ + writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); +} + +static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) +{ + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + int chan_id = mxs_chan->chan.chan_id; + + /* disable apbh channel clock */ + if (dma_is_apbh()) { + if (apbh_is_old()) + writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + else + writel(1 << chan_id, + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + } + + mxs_chan->status = DMA_SUCCESS; +} + +static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) +{ + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + int chan_id = mxs_chan->chan.chan_id; + + /* freeze the channel */ + if (dma_is_apbh() && apbh_is_old()) + writel(1 << chan_id, + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + else + writel(1 << chan_id, + mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); + + mxs_chan->status = DMA_PAUSED; +} + +static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) +{ + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + int chan_id = mxs_chan->chan.chan_id; + + /* unfreeze the channel */ + if (dma_is_apbh() && apbh_is_old()) + writel(1 << chan_id, + mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); + else + writel(1 << chan_id, + mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR); + + mxs_chan->status = DMA_IN_PROGRESS; +} + +static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan) +{ + dma_cookie_t cookie = mxs_chan->chan.cookie; + + if (++cookie < 0) + cookie = 1; + + mxs_chan->chan.cookie = cookie; + mxs_chan->desc.cookie = cookie; + + return cookie; +} + +static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct mxs_dma_chan, chan); +} + +static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan); + + mxs_dma_enable_chan(mxs_chan); + + return mxs_dma_assign_cookie(mxs_chan); +} + +static void mxs_dma_tasklet(unsigned long data) +{ + struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data; + + if (mxs_chan->desc.callback) + mxs_chan->desc.callback(mxs_chan->desc.callback_param); +} + +static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) +{ + struct mxs_dma_engine *mxs_dma = dev_id; + u32 stat1, stat2; + + /* completion status */ + stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); + stat1 &= MXS_DMA_CHANNELS_MASK; + writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR); + + /* error status */ + stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); + writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR); + + /* + * When both completion and error of termination bits set at the + * same time, we do not take it as an error. IOW, it only becomes + * an error we need to handler here in case of ether it's (1) an bus + * error or (2) a termination error with no completion. + */ + stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ + (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ + + /* combine error and completion status for checking */ + stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; + while (stat1) { + int channel = fls(stat1) - 1; + struct mxs_dma_chan *mxs_chan = + &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; + + if (channel >= MXS_DMA_CHANNELS) { + dev_dbg(mxs_dma->dma_device.dev, + "%s: error in channel %d\n", __func__, + channel - MXS_DMA_CHANNELS); + mxs_chan->status = DMA_ERROR; + mxs_dma_reset_chan(mxs_chan); + } else { + if (mxs_chan->flags & MXS_DMA_SG_LOOP) + mxs_chan->status = DMA_IN_PROGRESS; + else + mxs_chan->status = DMA_SUCCESS; + } + + stat1 &= ~(1 << channel); + + if (mxs_chan->status == DMA_SUCCESS) + mxs_chan->last_completed = mxs_chan->desc.cookie; + + /* schedule tasklet on this channel */ + tasklet_schedule(&mxs_chan->tasklet); + } + + return IRQ_HANDLED; +} + +static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + struct mxs_dma_data *data = chan->private; + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + int ret; + + if (!data) + return -EINVAL; + + mxs_chan->chan_irq = data->chan_irq; + + mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, + &mxs_chan->ccw_phys, GFP_KERNEL); + if (!mxs_chan->ccw) { + ret = -ENOMEM; + goto err_alloc; + } + + memset(mxs_chan->ccw, 0, PAGE_SIZE); + + ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, + 0, "mxs-dma", mxs_dma); + if (ret) + goto err_irq; + + ret = clk_enable(mxs_dma->clk); + if (ret) + goto err_clk; + + mxs_dma_reset_chan(mxs_chan); + + dma_async_tx_descriptor_init(&mxs_chan->desc, chan); + mxs_chan->desc.tx_submit = mxs_dma_tx_submit; + + /* the descriptor is ready */ + async_tx_ack(&mxs_chan->desc); + + return 0; + +err_clk: + free_irq(mxs_chan->chan_irq, mxs_dma); +err_irq: + dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, + mxs_chan->ccw, mxs_chan->ccw_phys); +err_alloc: + return ret; +} + +static void mxs_dma_free_chan_resources(struct dma_chan *chan) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + + mxs_dma_disable_chan(mxs_chan); + + free_irq(mxs_chan->chan_irq, mxs_dma); + + dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, + mxs_chan->ccw, mxs_chan->ccw_phys); + + clk_disable(mxs_dma->clk); +} + +static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction direction, + unsigned long append) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + struct mxs_dma_ccw *ccw; + struct scatterlist *sg; + int i, j; + u32 *pio; + static int idx; + + if (mxs_chan->status == DMA_IN_PROGRESS && !append) + return NULL; + + if (sg_len + (append ? idx : 0) > NUM_CCW) { + dev_err(mxs_dma->dma_device.dev, + "maximum number of sg exceeded: %d > %d\n", + sg_len, NUM_CCW); + goto err_out; + } + + mxs_chan->status = DMA_IN_PROGRESS; + mxs_chan->flags = 0; + + /* + * If the sg is prepared with append flag set, the sg + * will be appended to the last prepared sg. + */ + if (append) { + BUG_ON(idx < 1); + ccw = &mxs_chan->ccw[idx - 1]; + ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; + ccw->bits |= CCW_CHAIN; + ccw->bits &= ~CCW_IRQ; + ccw->bits &= ~CCW_DEC_SEM; + ccw->bits &= ~CCW_WAIT4END; + } else { + idx = 0; + } + + if (direction == DMA_NONE) { + ccw = &mxs_chan->ccw[idx++]; + pio = (u32 *) sgl; + + for (j = 0; j < sg_len;) + ccw->pio_words[j++] = *pio++; + + ccw->bits = 0; + ccw->bits |= CCW_IRQ; + ccw->bits |= CCW_DEC_SEM; + ccw->bits |= CCW_WAIT4END; + ccw->bits |= CCW_HALT_ON_TERM; + ccw->bits |= CCW_TERM_FLUSH; + ccw->bits |= BF_CCW(sg_len, PIO_NUM); + ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); + } else { + for_each_sg(sgl, sg, sg_len, i) { + if (sg->length > MAX_XFER_BYTES) { + dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", + sg->length, MAX_XFER_BYTES); + goto err_out; + } + + ccw = &mxs_chan->ccw[idx++]; + + ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; + ccw->bufaddr = sg->dma_address; + ccw->xfer_bytes = sg->length; + + ccw->bits = 0; + ccw->bits |= CCW_CHAIN; + ccw->bits |= CCW_HALT_ON_TERM; + ccw->bits |= CCW_TERM_FLUSH; + ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? + MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, + COMMAND); + + if (i + 1 == sg_len) { + ccw->bits &= ~CCW_CHAIN; + ccw->bits |= CCW_IRQ; + ccw->bits |= CCW_DEC_SEM; + ccw->bits |= CCW_WAIT4END; + } + } + } + + return &mxs_chan->desc; + +err_out: + mxs_chan->status = DMA_ERROR; + return NULL; +} + +static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, + size_t period_len, enum dma_data_direction direction) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + int num_periods = buf_len / period_len; + int i = 0, buf = 0; + + if (mxs_chan->status == DMA_IN_PROGRESS) + return NULL; + + mxs_chan->status = DMA_IN_PROGRESS; + mxs_chan->flags |= MXS_DMA_SG_LOOP; + + if (num_periods > NUM_CCW) { + dev_err(mxs_dma->dma_device.dev, + "maximum number of sg exceeded: %d > %d\n", + num_periods, NUM_CCW); + goto err_out; + } + + if (period_len > MAX_XFER_BYTES) { + dev_err(mxs_dma->dma_device.dev, + "maximum period size exceeded: %d > %d\n", + period_len, MAX_XFER_BYTES); + goto err_out; + } + + while (buf < buf_len) { + struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i]; + + if (i + 1 == num_periods) + ccw->next = mxs_chan->ccw_phys; + else + ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1); + + ccw->bufaddr = dma_addr; + ccw->xfer_bytes = period_len; + + ccw->bits = 0; + ccw->bits |= CCW_CHAIN; + ccw->bits |= CCW_IRQ; + ccw->bits |= CCW_HALT_ON_TERM; + ccw->bits |= CCW_TERM_FLUSH; + ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? + MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); + + dma_addr += period_len; + buf += period_len; + + i++; + } + + return &mxs_chan->desc; + +err_out: + mxs_chan->status = DMA_ERROR; + return NULL; +} + +static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + int ret = 0; + + switch (cmd) { + case DMA_TERMINATE_ALL: + mxs_dma_disable_chan(mxs_chan); + break; + case DMA_PAUSE: + mxs_dma_pause_chan(mxs_chan); + break; + case DMA_RESUME: + mxs_dma_resume_chan(mxs_chan); + break; + default: + ret = -ENOSYS; + } + + return ret; +} + +static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + dma_cookie_t last_used; + + last_used = chan->cookie; + dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0); + + return mxs_chan->status; +} + +static void mxs_dma_issue_pending(struct dma_chan *chan) +{ + /* + * Nothing to do. We only have a single descriptor. + */ +} + +static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) +{ + int ret; + + ret = clk_enable(mxs_dma->clk); + if (ret) + goto err_out; + + ret = mxs_reset_block(mxs_dma->base); + if (ret) + goto err_out; + + /* only major version matters */ + mxs_dma->version = readl(mxs_dma->base + + ((mxs_dma->dev_id == MXS_DMA_APBX) ? + HW_APBX_VERSION : HW_APBH_VERSION)) >> + BP_APBHX_VERSION_MAJOR; + + /* enable apbh burst */ + if (dma_is_apbh()) { + writel(BM_APBH_CTRL0_APB_BURST_EN, + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + writel(BM_APBH_CTRL0_APB_BURST8_EN, + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + } + + /* enable irq for all the channels */ + writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, + mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); + + clk_disable(mxs_dma->clk); + + return 0; + +err_out: + return ret; +} + +static int __init mxs_dma_probe(struct platform_device *pdev) +{ + const struct platform_device_id *id_entry = + platform_get_device_id(pdev); + struct mxs_dma_engine *mxs_dma; + struct resource *iores; + int ret, i; + + mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL); + if (!mxs_dma) + return -ENOMEM; + + mxs_dma->dev_id = id_entry->driver_data; + + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if (!request_mem_region(iores->start, resource_size(iores), + pdev->name)) { + ret = -EBUSY; + goto err_request_region; + } + + mxs_dma->base = ioremap(iores->start, resource_size(iores)); + if (!mxs_dma->base) { + ret = -ENOMEM; + goto err_ioremap; + } + + mxs_dma->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(mxs_dma->clk)) { + ret = PTR_ERR(mxs_dma->clk); + goto err_clk; + } + + dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask); + dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask); + + INIT_LIST_HEAD(&mxs_dma->dma_device.channels); + + /* Initialize channel parameters */ + for (i = 0; i < MXS_DMA_CHANNELS; i++) { + struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; + + mxs_chan->mxs_dma = mxs_dma; + mxs_chan->chan.device = &mxs_dma->dma_device; + + tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, + (unsigned long) mxs_chan); + + + /* Add the channel to mxs_chan list */ + list_add_tail(&mxs_chan->chan.device_node, + &mxs_dma->dma_device.channels); + } + + ret = mxs_dma_init(mxs_dma); + if (ret) + goto err_init; + + mxs_dma->dma_device.dev = &pdev->dev; + + /* mxs_dma gets 65535 bytes maximum sg size */ + mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms; + dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES); + + mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources; + mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources; + mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; + mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; + mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; + mxs_dma->dma_device.device_control = mxs_dma_control; + mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; + + ret = dma_async_device_register(&mxs_dma->dma_device); + if (ret) { + dev_err(mxs_dma->dma_device.dev, "unable to register\n"); + goto err_init; + } + + dev_info(mxs_dma->dma_device.dev, "initialized\n"); + + return 0; + +err_init: + clk_put(mxs_dma->clk); +err_clk: + iounmap(mxs_dma->base); +err_ioremap: + release_mem_region(iores->start, resource_size(iores)); +err_request_region: + kfree(mxs_dma); + return ret; +} + +static struct platform_device_id mxs_dma_type[] = { + { + .name = "mxs-dma-apbh", + .driver_data = MXS_DMA_APBH, + }, { + .name = "mxs-dma-apbx", + .driver_data = MXS_DMA_APBX, + } +}; + +static struct platform_driver mxs_dma_driver = { + .driver = { + .name = "mxs-dma", + }, + .id_table = mxs_dma_type, +}; + +static int __init mxs_dma_module_init(void) +{ + return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe); +} +subsys_initcall(mxs_dma_module_init); -- cgit v1.2.3 From f44ad7e91dd12bed0959b3e715f4f3ab84951a59 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:14 +0530 Subject: dw_dmac: Remove compilation dependency from AVR32 and put on HAVE_CLK This driver will now be used in atleast two platforms AVR32 & ARM. And there is no actual hardware dependency of this driver over AVR32 or ARM. So this dependency can be removed altogether. Also dw_dmac driver uses clk framework and must have compilation dependency on HAVE_CLK Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 76f6472ba31..d700895cb40 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -82,7 +82,7 @@ config INTEL_IOP_ADMA config DW_DMAC tristate "Synopsys DesignWare AHB DMA support" - depends on AVR32 + depends on HAVE_CLK select DMA_ENGINE default y if CPU_AT32AP7000 help -- cgit v1.2.3 From cb689a706d17ef19a61735670ded60466dd015fa Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:15 +0530 Subject: dw_dmac: Replace module_init() with subsys_initcall() In some cases users of dw_dmac are initialized before dw_dmac, and if they try to use dw_dmac, they simply fail. So its better we register init() routine of driver using subsys_initcall() instead of module_init(), so that dma driver is available at the earliest possible. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 08dab3badad..064a1830a76 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -1455,7 +1455,7 @@ static int __init dw_init(void) { return platform_driver_probe(&dw_driver, dw_probe); } -module_init(dw_init); +subsys_initcall(dw_init); static void __exit dw_exit(void) { -- cgit v1.2.3 From f336e42f73d93b74fd21bf9176ee6c7ab8b195c5 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:16 +0530 Subject: dw_dmac: Move single descriptor from dwc->queue to dwc->active_list in dwc_complete_all dwc_complete_all and other routines was removing all descriptors from dwc->queue and pushing them to dwc->active_list. Only one was required to be removed. Also we are calling dwc_dostart, once list is fixed. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 064a1830a76..942b50f57f2 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -87,11 +87,6 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) return list_entry(dwc->active_list.next, struct dw_desc, desc_node); } -static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc) -{ - return list_entry(dwc->queue.next, struct dw_desc, desc_node); -} - static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) { struct dw_desc *desc, *_desc; @@ -262,10 +257,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) * Submit queued descriptors ASAP, i.e. before we go through * the completed ones. */ - if (!list_empty(&dwc->queue)) - dwc_dostart(dwc, dwc_first_queued(dwc)); list_splice_init(&dwc->active_list, &list); - list_splice_init(&dwc->queue, &dwc->active_list); + if (!list_empty(&dwc->queue)) { + list_move(dwc->queue.next, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); + } list_for_each_entry_safe(desc, _desc, &list, desc_node) dwc_descriptor_complete(dwc, desc); @@ -325,8 +321,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) cpu_relax(); if (!list_empty(&dwc->queue)) { - dwc_dostart(dwc, dwc_first_queued(dwc)); - list_splice_init(&dwc->queue, &dwc->active_list); + list_move(dwc->queue.next, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); } } @@ -352,7 +348,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) */ bad_desc = dwc_first_active(dwc); list_del_init(&bad_desc->desc_node); - list_splice_init(&dwc->queue, dwc->active_list.prev); + list_move(dwc->queue.next, dwc->active_list.prev); /* Clear the error flag and try to restart the controller */ dma_writel(dw, CLEAR.ERROR, dwc->mask); @@ -547,8 +543,8 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) if (list_empty(&dwc->active_list)) { dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", desc->txd.cookie); - dwc_dostart(dwc, desc); list_add_tail(&desc->desc_node, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); } else { dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", desc->txd.cookie); -- cgit v1.2.3 From 569432efa7975f5795efb8142134f5a098942381 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:17 +0530 Subject: dw_dmac: Calling dwc_scan_descriptors from dwc_tx_status() after taking lock Lock must be taken before calling dwc_scan_descriptors, as this may access/modify shared data and queues. dwc_tx_status wasn't taking lock before calling this routine. This patch add code that takes lock before calling dwc_scan_descriptors. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 942b50f57f2..2b0d5e99ae7 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -836,7 +836,9 @@ dwc_tx_status(struct dma_chan *chan, ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret != DMA_SUCCESS) { + spin_lock_bh(&dwc->lock); dwc_scan_descriptors(to_dw_dma(chan->device), dwc); + spin_unlock_bh(&dwc->lock); last_complete = dwc->completed; last_used = chan->cookie; -- cgit v1.2.3 From a02274564dd78f7edde3c9ff197ed44f2f8a5a81 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:18 +0530 Subject: dw_dmac: Adding support for 64 bit access width for memcpy xfers Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 2b0d5e99ae7..e5d97bf9264 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -583,7 +583,9 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, * We can be a lot more clever here, but this should take care * of the most common optimization. */ - if (!((src | dest | len) & 3)) + if (!((src | dest | len) & 7)) + src_width = dst_width = 3; + else if (!((src | dest | len) & 3)) src_width = dst_width = 2; else if (!((src | dest | len) & 1)) src_width = dst_width = 1; -- cgit v1.2.3 From 418e74070662e1ae7d9bb5202f773d35c9a7f05e Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 4 Mar 2011 15:42:50 +0530 Subject: dw_dmac: Change value of DWC_MAX_COUNT to 4095. Every descriptor can transfer a maximum count of 4095 (12 bits, in control reg), So we must have DWC_MAX_COUNT as 4095 instead of 2048. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index e5d97bf9264..711ebe954f5 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -47,14 +47,13 @@ /* * This is configuration-dependent and usually a funny size like 4095. - * Let's round it down to the nearest power of two. * * Note that this is a transfer count, i.e. if we transfer 32-bit - * words, we can do 8192 bytes per descriptor. + * words, we can do 16380 bytes per descriptor. * * This parameter is also system-specific. */ -#define DWC_MAX_COUNT 2048U +#define DWC_MAX_COUNT 4095U /* * Number of descriptors to allocate for each channel. This should be -- cgit v1.2.3 From e518076ef8cb56adb558ff56ad5bfa0cd9f3abd9 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:20 +0530 Subject: dw_dmac: Mark all tx_descriptors with DMA_CRTL_ACK after xfer finish dwc_desc_get checks all descriptors for DMA_CTRL_ACK before allocating them for transfers. And descriptors are not marked with DMA_CRTL_ACK after transfer finishes. Thus descriptor once used is not usable again. This patch marks descriptors with DMA_CRTL_ACK after dma xfer finishes Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 711ebe954f5..6ab440e532b 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -198,6 +198,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) dma_async_tx_callback callback; void *param; struct dma_async_tx_descriptor *txd = &desc->txd; + struct dw_desc *child; dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); @@ -206,6 +207,12 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) param = txd->callback_param; dwc_sync_desc_for_cpu(dwc, desc); + + /* async_tx_ack */ + list_for_each_entry(child, &desc->tx_list, desc_node) + async_tx_ack(&child->txd); + async_tx_ack(&desc->txd); + list_splice_init(&desc->tx_list, &dwc->free_list); list_move(&desc->desc_node, &dwc->free_list); -- cgit v1.2.3 From b0c3130d69bda5cd91aa3b3f08e7878df49fde69 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:21 +0530 Subject: dw_dmac: Pass Channel Allocation Order from platform_data In SPEAr Platform channels 4-7 have more Fifo depth. So we must get better channel first. This patch introduces concept of channel allocation order in dw_dmac. If user doesn't pass anything or 0, than normal (ascending) channel allocation will follow, else channels will be allocated in descending order. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 6ab440e532b..f413e123405 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -1319,7 +1319,11 @@ static int __init dw_probe(struct platform_device *pdev) dwc->chan.device = &dw->dma; dwc->chan.cookie = dwc->completed = 1; dwc->chan.chan_id = i; - list_add_tail(&dwc->chan.device_node, &dw->dma.channels); + if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) + list_add_tail(&dwc->chan.device_node, + &dw->dma.channels); + else + list_add(&dwc->chan.device_node, &dw->dma.channels); dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; spin_lock_init(&dwc->lock); -- cgit v1.2.3 From 93317e8e35b77633d589fe0e132291195757d785 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:22 +0530 Subject: dw_dmac: Pass Channel Priority from platform_data In Synopsys designware, channel priority is programmable. This patch adds support for passing channel priority through platform data. By default Ascending channel priority will be followed, i.e. channel 0 will get highest priority and channel 7 will get lowest. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 11 ++++++++++- drivers/dma/dw_dmac_regs.h | 3 +++ 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index f413e123405..318a342fc7e 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -901,8 +901,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); cfghi = dws->cfg_hi; - cfglo = dws->cfg_lo; + cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; } + + cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority); + channel_writel(dwc, CFG_LO, cfglo); channel_writel(dwc, CFG_HI, cfghi); @@ -1325,6 +1328,12 @@ static int __init dw_probe(struct platform_device *pdev) else list_add(&dwc->chan.device_node, &dw->dma.channels); + /* 7 is highest priority & 0 is lowest. */ + if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) + dwc->priority = 7 - i; + else + dwc->priority = i; + dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; spin_lock_init(&dwc->lock); dwc->mask = 1 << i; diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index d9a939f67f4..6a8e6d35f35 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -101,6 +101,8 @@ struct dw_dma_regs { #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff /* Bitfields in CFG_LO. Platform-configurable bits are in */ +#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ +#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ @@ -134,6 +136,7 @@ struct dw_dma_chan { struct dma_chan chan; void __iomem *ch_regs; u8 mask; + u8 priority; spinlock_t lock; -- cgit v1.2.3 From ee66509d7f354eecb45ac99f21ea6aa8650dea7e Mon Sep 17 00:00:00 2001 From: Viresh KUMAR Date: Fri, 4 Mar 2011 15:42:51 +0530 Subject: dw_dmac: Allow src/dst msize & flow controller to be configured at runtime Msize or Burst Size is peripheral dependent in case of prep_slave_sg and cyclic_prep transfers, and in case of memcpy transfers it is platform dependent. So msize configuration must come from platform data. Also some peripherals (ex: JPEG), need to be flow controller for dma transfers, so this information in case of slave_sg & cyclic_prep transfers must come from platform data. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 14 ++++++++------ drivers/dma/dw_dmac_regs.h | 1 + 2 files changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 318a342fc7e..90ea08a53d6 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -36,9 +36,11 @@ struct dw_dma_slave *__slave = (private); \ int dms = __slave ? __slave->dst_master : 0; \ int sms = __slave ? __slave->src_master : 1; \ + u8 smsize = __slave ? __slave->src_msize : 0; \ + u8 dmsize = __slave ? __slave->dst_msize : 0; \ \ - (DWC_CTLL_DST_MSIZE(0) \ - | DWC_CTLL_SRC_MSIZE(0) \ + (DWC_CTLL_DST_MSIZE(dmsize) \ + | DWC_CTLL_SRC_MSIZE(smsize) \ | DWC_CTLL_LLP_D_EN \ | DWC_CTLL_LLP_S_EN \ | DWC_CTLL_DMS(dms) \ @@ -683,7 +685,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_DST_FIX | DWC_CTLL_SRC_INC - | DWC_CTLL_FC_M2P); + | DWC_CTLL_FC(dws->fc)); reg = dws->tx_reg; for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; @@ -728,7 +730,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_DST_INC | DWC_CTLL_SRC_FIX - | DWC_CTLL_FC_P2M); + | DWC_CTLL_FC(dws->fc)); reg = dws->rx_reg; for_each_sg(sgl, sg, sg_len, i) { @@ -1146,7 +1148,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_DST_FIX | DWC_CTLL_SRC_INC - | DWC_CTLL_FC_M2P + | DWC_CTLL_FC(dws->fc) | DWC_CTLL_INT_EN); break; case DMA_FROM_DEVICE: @@ -1157,7 +1159,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_DST_INC | DWC_CTLL_SRC_FIX - | DWC_CTLL_FC_P2M + | DWC_CTLL_FC(dws->fc) | DWC_CTLL_INT_EN); break; default: diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 6a8e6d35f35..9a32964bf79 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -86,6 +86,7 @@ struct dw_dma_regs { #define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) #define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ #define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ +#define DWC_CTLL_FC(n) ((n) << 20) #define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ #define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ #define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ -- cgit v1.2.3 From e51dc53b8c7fa2d9ac4ef8f317f5dfe07a79e65a Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:25 +0530 Subject: dw_dmac: Setting Default Burst length for transfers as 16. This patch sets default Burst length for all transfer to 16. This will enhance performance when user doesn't have any chan->private data. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 90ea08a53d6..9c25c7d099e 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -36,8 +36,8 @@ struct dw_dma_slave *__slave = (private); \ int dms = __slave ? __slave->dst_master : 0; \ int sms = __slave ? __slave->src_master : 1; \ - u8 smsize = __slave ? __slave->src_msize : 0; \ - u8 dmsize = __slave ? __slave->dst_msize : 0; \ + u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \ + u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \ \ (DWC_CTLL_DST_MSIZE(dmsize) \ | DWC_CTLL_SRC_MSIZE(smsize) \ -- cgit v1.2.3 From 29782da5f0206335e2325508ba4fee0d624ddab6 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 4 Mar 2011 14:58:32 +0530 Subject: dmaengine/dw_dmac fix: use readl & writel instead of __raw_readl & __raw_writel On ARMv7 cores, device memory mapped as Normal Non-cacheable, may not guarantee ordered access causing failures in device drivers that do not use the mandatory memory barriers. readl & writel versions contain necessary memory barriers for this. commit 79f64dbf68c8a9779a7e9a25e0a9f0217a25b57a: "ARM: 6273/1: Add barriers to the I/O accessors if ARM_DMA_MEM_BUFFERABLE" can be referred for more information on this. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac_regs.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 9a32964bf79..720f821527f 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -159,9 +159,9 @@ __dwc_regs(struct dw_dma_chan *dwc) } #define channel_readl(dwc, name) \ - __raw_readl(&(__dwc_regs(dwc)->name)) + readl(&(__dwc_regs(dwc)->name)) #define channel_writel(dwc, name, val) \ - __raw_writel((val), &(__dwc_regs(dwc)->name)) + writel((val), &(__dwc_regs(dwc)->name)) static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) { @@ -185,9 +185,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) } #define dma_readl(dw, name) \ - __raw_readl(&(__dw_regs(dw)->name)) + readl(&(__dw_regs(dw)->name)) #define dma_writel(dw, name, val) \ - __raw_writel((val), &(__dw_regs(dw)->name)) + writel((val), &(__dw_regs(dw)->name)) #define channel_set_bit(dw, reg, mask) \ dma_writel(dw, reg, ((mask) << 8) | (mask)) -- cgit v1.2.3 From 0b863b333f529c7ddd8bee58e6696a7254417a05 Mon Sep 17 00:00:00 2001 From: Rakib Mullick Date: Sun, 6 Mar 2011 17:26:10 +0600 Subject: drivers, pch_dma: Fix warning when CONFIG_PM=n. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When CONFIG_PM=n, we get the following warning: drivers/dma/pch_dma.c:741: warning: ‘pch_dma_suspend’ defined but not used drivers/dma/pch_dma.c:755: warning: ‘pch_dma_resume’ defined but not used To fix it, wrap pch_dma_{suspend,resume} and pch_dma_{save,restore}_regs functions with CONFIG_PM. Signed-off-by: Rakib Mullick Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index b1dfba7e37b..8d8fef1480a 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -693,6 +693,7 @@ static irqreturn_t pd_irq(int irq, void *devid) return ret; } +#ifdef CONFIG_PM static void pch_dma_save_regs(struct pch_dma *pd) { struct pch_dma_chan *pd_chan; @@ -770,6 +771,7 @@ static int pch_dma_resume(struct pci_dev *pdev) return 0; } +#endif static int __devinit pch_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id) -- cgit v1.2.3