From 087809fce28f50098d9c3ef1a6865c722f23afd2 Mon Sep 17 00:00:00 2001 From: Jamie Iles Date: Fri, 21 Jan 2011 14:11:52 +0000 Subject: dmaengine/dw_dmac: don't scan descriptors if no xfers in progress Some hardware (picoChip picoXCell in particular) sometimes has the block transfer complete bit being set for a channel after the whole transfer has completed. If we don't have any transfers in the active list then don't bother to scan the descriptors. This often happens in normal operation and doesn't require the channel to be reset. v2: cleanup whitespace Signed-off-by: Jamie Iles Signed-off-by: Dan Williams --- drivers/dma/dw_dmac.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index a3991ab0d67..db22754be35 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -291,6 +291,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) return; } + if (list_empty(&dwc->active_list)) + return; + dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { -- cgit v1.2.3 From f301c062dcdd113bc977ae1ebc8c12232f8531a9 Mon Sep 17 00:00:00 2001 From: Jamie Iles Date: Fri, 21 Jan 2011 14:11:53 +0000 Subject: dmaengine/dw_dmac: allow src/dst masters to be configured at runtime Some platforms have flexible mastering capabilities and this needs to be selected at runtime. If the platform has specified private data in the form of the dw_dma_slave then fetch the source and destination masters from here. If this isn't present, default to the previous of 0 and 1. v2: cleanup whitespace Acked-by: Hans-Christian Egtvedt Signed-off-by: Jamie Iles Signed-off-by: Dan Williams --- drivers/dma/dw_dmac.c | 31 +++++++++++++++++-------------- include/linux/dw_dmac.h | 2 ++ 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index db22754be35..a4cf2614085 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -32,15 +32,18 @@ * which does not support descriptor writeback. */ -/* NOTE: DMS+SMS is system-specific. We should get this information - * from the platform code somehow. - */ -#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \ - | DWC_CTLL_SRC_MSIZE(0) \ - | DWC_CTLL_DMS(0) \ - | DWC_CTLL_SMS(1) \ - | DWC_CTLL_LLP_D_EN \ - | DWC_CTLL_LLP_S_EN) +#define DWC_DEFAULT_CTLLO(private) ({ \ + struct dw_dma_slave *__slave = (private); \ + int dms = __slave ? __slave->dst_master : 0; \ + int sms = __slave ? __slave->src_master : 1; \ + \ + (DWC_CTLL_DST_MSIZE(0) \ + | DWC_CTLL_SRC_MSIZE(0) \ + | DWC_CTLL_LLP_D_EN \ + | DWC_CTLL_LLP_S_EN \ + | DWC_CTLL_DMS(dms) \ + | DWC_CTLL_SMS(sms)); \ + }) /* * This is configuration-dependent and usually a funny size like 4095. @@ -591,7 +594,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, else src_width = dst_width = 0; - ctllo = DWC_DEFAULT_CTLLO + ctllo = DWC_DEFAULT_CTLLO(chan->private) | DWC_CTLL_DST_WIDTH(dst_width) | DWC_CTLL_SRC_WIDTH(src_width) | DWC_CTLL_DST_INC @@ -672,7 +675,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, switch (direction) { case DMA_TO_DEVICE: - ctllo = (DWC_DEFAULT_CTLLO + ctllo = (DWC_DEFAULT_CTLLO(chan->private) | DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_DST_FIX | DWC_CTLL_SRC_INC @@ -717,7 +720,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, } break; case DMA_FROM_DEVICE: - ctllo = (DWC_DEFAULT_CTLLO + ctllo = (DWC_DEFAULT_CTLLO(chan->private) | DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_DST_INC | DWC_CTLL_SRC_FIX @@ -1129,7 +1132,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, case DMA_TO_DEVICE: desc->lli.dar = dws->tx_reg; desc->lli.sar = buf_addr + (period_len * i); - desc->lli.ctllo = (DWC_DEFAULT_CTLLO + desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) | DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_DST_FIX @@ -1140,7 +1143,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, case DMA_FROM_DEVICE: desc->lli.dar = buf_addr + (period_len * i); desc->lli.sar = dws->rx_reg; - desc->lli.ctllo = (DWC_DEFAULT_CTLLO + desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) | DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_DST_INC diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index c8aad713a04..8014eb81054 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h @@ -52,6 +52,8 @@ struct dw_dma_slave { enum dw_dma_slave_width reg_width; u32 cfg_hi; u32 cfg_lo; + int src_master; + int dst_master; }; /* Platform-configurable bits in CFG_HI */ -- cgit v1.2.3 From 95ea759e9e116dade3e7386be2a3db76c90f4675 Mon Sep 17 00:00:00 2001 From: Jamie Iles Date: Fri, 21 Jan 2011 14:11:54 +0000 Subject: dmaengine/dw_dmac: provide a mechanism to indicate private devices Some platforms (e.g. Picochip PC3XX) have multiple DMA controllers where some may be used for slave transfers and others for general purpose memcpy type transfers. Add a .is_private boolean to the platform data structure so that controllers can be marked as private so that the DMA_PRIVATE capability will be set for that controller. Signed-off-by: Jamie Iles Signed-off-by: Dan Williams --- drivers/dma/dw_dmac.c | 2 ++ include/linux/dw_dmac.h | 3 +++ 2 files changed, 5 insertions(+) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index a4cf2614085..08dab3badad 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -1341,6 +1341,8 @@ static int __init dw_probe(struct platform_device *pdev) dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); + if (pdata->is_private) + dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); dw->dma.dev = &pdev->dev; dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; dw->dma.device_free_chan_resources = dwc_free_chan_resources; diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index 8014eb81054..deec66b3718 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h @@ -16,9 +16,12 @@ /** * struct dw_dma_platform_data - Controller configuration parameters * @nr_channels: Number of channels supported by hardware (max 8) + * @is_private: The device channels should be marked as private and not for + * by the general purpose DMA channel allocator. */ struct dw_dma_platform_data { unsigned int nr_channels; + bool is_private; }; /** -- cgit v1.2.3 From 4aa5f366431fef0afca0df348ca9782c63ac9911 Mon Sep 17 00:00:00 2001 From: Jamie Iles Date: Fri, 21 Jan 2011 14:11:55 +0000 Subject: avr32: at32ap700x: specify DMA src and dst masters Now that the dw_dmac DMA driver supports configurable source and destination masters we need to specify which ones to use. This was previously hardcoded to 0 and 1 respectively in the driver. Acked-by: Hans-Christian Egtvedt Signed-off-by: Jamie Iles Signed-off-by: Dan Williams --- arch/avr32/mach-at32ap/at32ap700x.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index e67c9994542..2747cde8c9a 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -2048,6 +2048,8 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, rx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3); rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); + rx_dws->src_master = 0; + rx_dws->dst_master = 1; } /* Check if DMA slave interface for playback should be configured. */ @@ -2056,6 +2058,8 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; tx_dws->cfg_hi = DWC_CFGH_DST_PER(4); tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); + rx_dws->src_master = 0; + rx_dws->dst_master = 1; } if (platform_device_add_data(pdev, data, @@ -2128,6 +2132,8 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data) dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT; dws->cfg_hi = DWC_CFGH_DST_PER(2); dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); + dws->src_master = 0; + dws->dst_master = 1; if (platform_device_add_data(pdev, data, sizeof(struct atmel_abdac_pdata))) -- cgit v1.2.3 From cb9ab2d8e4661c811d5e9a8e687b6f736690c90e Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:04 +0100 Subject: dma40: make init function static Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 6e1d46a65d0..52013ed4b44 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3060,7 +3060,7 @@ static struct platform_driver d40_driver = { }, }; -int __init stedma40_init(void) +static int __init stedma40_init(void) { return platform_driver_probe(&d40_driver, d40_probe); } -- cgit v1.2.3 From 262d2915d4f11e5e78e432ab68f0ee034ef3f75f Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:05 +0100 Subject: dma40: ensure event lines get enabled The controller sometimes fails to register the enable of the event line when both src and dst event lines are used on the same logical channel. Implement the recommended software workaround, which is to retry the write until it works. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 63 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 46 insertions(+), 17 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 52013ed4b44..42c88fc2881 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -303,6 +303,11 @@ struct d40_reg_val { unsigned int val; }; +static struct device *chan2dev(struct d40_chan *d40c) +{ + return &d40c->chan.dev->device; +} + static int d40_pool_lli_alloc(struct d40_desc *d40d, int lli_len, bool is_log) { @@ -701,17 +706,46 @@ static void d40_term_all(struct d40_chan *d40c) d40c->busy = false; } +static void __d40_config_set_event(struct d40_chan *d40c, bool enable, + u32 event, int reg) +{ + void __iomem *addr = d40c->base->virtbase + D40_DREG_PCBASE + + d40c->phy_chan->num * D40_DREG_PCDELTA + reg; + int tries; + + if (!enable) { + writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) + | ~D40_EVENTLINE_MASK(event), addr); + return; + } + + /* + * The hardware sometimes doesn't register the enable when src and dst + * event lines are active on the same logical channel. Retry to ensure + * it does. Usually only one retry is sufficient. + */ + tries = 100; + while (--tries) { + writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) + | ~D40_EVENTLINE_MASK(event), addr); + + if (readl(addr) & D40_EVENTLINE_MASK(event)) + break; + } + + if (tries != 99) + dev_dbg(chan2dev(d40c), + "[%s] workaround enable S%cLNK (%d tries)\n", + __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', + 100 - tries); + + WARN_ON(!tries); +} + static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) { - u32 val; unsigned long flags; - /* Notice, that disable requires the physical channel to be stopped */ - if (do_enable) - val = D40_ACTIVATE_EVENTLINE; - else - val = D40_DEACTIVATE_EVENTLINE; - spin_lock_irqsave(&d40c->phy_chan->lock, flags); /* Enable event line connected to device (or memcpy) */ @@ -719,20 +753,15 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); - writel((val << D40_EVENTLINE_POS(event)) | - ~D40_EVENTLINE_MASK(event), - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSLNK); + __d40_config_set_event(d40c, do_enable, event, + D40_CHAN_REG_SSLNK); } + if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); - writel((val << D40_EVENTLINE_POS(event)) | - ~D40_EVENTLINE_MASK(event), - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDLNK); + __d40_config_set_event(d40c, do_enable, event, + D40_CHAN_REG_SDLNK); } spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); -- cgit v1.2.3 From 7d83a854a1a44a8f6a699503441403a36c42f66c Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:06 +0100 Subject: dma40: remove "hardware link with previous jobs" code This link in hardware with previous jobs code is: - unused, no clients using or requiring this feature - incomplete, being implemented only for physical channels - broken, only working to perform one link Remove it. This also allows us to get rid of the channel pause in the submit_tx() routine. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 113 +++--------------------------------------------- 1 file changed, 6 insertions(+), 107 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 42c88fc2881..ed2a3ebcd86 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -94,7 +94,6 @@ struct d40_lli_pool { * during a transfer. * @node: List entry. * @is_in_client_list: true if the client owns this descriptor. - * @is_hw_linked: true if this job will automatically be continued for * the previous one. * * This descriptor is used for both logical and physical transfers. @@ -114,7 +113,6 @@ struct d40_desc { struct list_head node; bool is_in_client_list; - bool is_hw_linked; }; /** @@ -548,18 +546,6 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c) return d; } -static struct d40_desc *d40_last_queued(struct d40_chan *d40c) -{ - struct d40_desc *d; - - if (list_empty(&d40c->queue)) - return NULL; - list_for_each_entry(d, &d40c->queue, node) - if (list_is_last(&d->node, &d40c->queue)) - break; - return d; -} - static int d40_psize_2_burst_size(bool is_log, int psize) { if (is_log) { @@ -940,77 +926,6 @@ no_suspend: return res; } -static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d) -{ - /* TODO: Write */ -} - -static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d) -{ - struct d40_desc *d40d_prev = NULL; - int i; - u32 val; - - if (!list_empty(&d40c->queue)) - d40d_prev = d40_last_queued(d40c); - else if (!list_empty(&d40c->active)) - d40d_prev = d40_first_active_get(d40c); - - if (!d40d_prev) - return; - - /* Here we try to join this job with previous jobs */ - val = readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSLNK); - - /* Figure out which link we're currently transmitting */ - for (i = 0; i < d40d_prev->lli_len; i++) - if (val == d40d_prev->lli_phy.src[i].reg_lnk) - break; - - val = readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS; - - if (i == (d40d_prev->lli_len - 1) && val > 0) { - /* Change the current one */ - writel(virt_to_phys(d40d->lli_phy.src), - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSLNK); - writel(virt_to_phys(d40d->lli_phy.dst), - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDLNK); - - d40d->is_hw_linked = true; - - } else if (i < d40d_prev->lli_len) { - (void) dma_unmap_single(d40c->base->dev, - virt_to_phys(d40d_prev->lli_phy.src), - d40d_prev->lli_pool.size, - DMA_TO_DEVICE); - - /* Keep the settings */ - val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk & - ~D40_SREG_LNK_PHYS_LNK_MASK; - d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk = - val | virt_to_phys(d40d->lli_phy.src); - - val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk & - ~D40_SREG_LNK_PHYS_LNK_MASK; - d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk = - val | virt_to_phys(d40d->lli_phy.dst); - - (void) dma_map_single(d40c->base->dev, - d40d_prev->lli_phy.src, - d40d_prev->lli_pool.size, - DMA_TO_DEVICE); - d40d->is_hw_linked = true; - } -} - static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) { struct d40_chan *d40c = container_of(tx->chan, @@ -1019,8 +934,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); unsigned long flags; - (void) d40_pause(&d40c->chan); - spin_lock_irqsave(&d40c->lock, flags); d40c->chan.cookie++; @@ -1030,17 +943,10 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) d40d->txd.cookie = d40c->chan.cookie; - if (d40c->log_num == D40_PHY_CHAN) - d40_tx_submit_phy(d40c, d40d); - else - d40_tx_submit_log(d40c, d40d); - d40_desc_queue(d40c, d40d); spin_unlock_irqrestore(&d40c->lock, flags); - (void) d40_resume(&d40c->chan); - return tx->cookie; } @@ -1080,21 +986,14 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c) /* Add to active queue */ d40_desc_submit(d40c, d40d); - /* - * If this job is already linked in hw, - * do not submit it. - */ - - if (!d40d->is_hw_linked) { - /* Initiate DMA job */ - d40_desc_load(d40c, d40d); + /* Initiate DMA job */ + d40_desc_load(d40c, d40d); - /* Start dma job */ - err = d40_start(d40c); + /* Start dma job */ + err = d40_start(d40c); - if (err) - return NULL; - } + if (err) + return NULL; } return d40d; -- cgit v1.2.3 From 8ca84687b91322b9eafeaf4da43a21684cd0316e Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:07 +0100 Subject: dma40: use helper for channel registers base The register offset computation for accessing channel registers is copy/pasted in several places. Create a helper function to do it. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 74 ++++++++++++++++++++----------------------------- 1 file changed, 30 insertions(+), 44 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index ed2a3ebcd86..3d4cea3cff3 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -306,6 +306,12 @@ static struct device *chan2dev(struct d40_chan *d40c) return &d40c->chan.dev->device; } +static void __iomem *chan_base(struct d40_chan *chan) +{ + return chan->base->virtbase + D40_DREG_PCBASE + + chan->phy_chan->num * D40_DREG_PCDELTA; +} + static int d40_pool_lli_alloc(struct d40_desc *d40d, int lli_len, bool is_log) { @@ -695,8 +701,7 @@ static void d40_term_all(struct d40_chan *d40c) static void __d40_config_set_event(struct d40_chan *d40c, bool enable, u32 event, int reg) { - void __iomem *addr = d40c->base->virtbase + D40_DREG_PCBASE - + d40c->phy_chan->num * D40_DREG_PCDELTA + reg; + void __iomem *addr = chan_base(d40c) + reg; int tries; if (!enable) { @@ -755,15 +760,12 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) static u32 d40_chan_has_events(struct d40_chan *d40c) { + void __iomem *chanbase = chan_base(d40c); u32 val; - val = readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSLNK); + val = readl(chanbase + D40_CHAN_REG_SSLNK); + val |= readl(chanbase + D40_CHAN_REG_SDLNK); - val |= readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDLNK); return val; } @@ -810,29 +812,17 @@ static void d40_config_write(struct d40_chan *d40c) writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); if (d40c->log_num != D40_PHY_CHAN) { + int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) + & D40_SREG_ELEM_LOG_LIDX_MASK; + void __iomem *chanbase = chan_base(d40c); + /* Set default config for CFG reg */ - writel(d40c->src_def_cfg, - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSCFG); - writel(d40c->dst_def_cfg, - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDCFG); + writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); + writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); /* Set LIDX for lcla */ - writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & - D40_SREG_ELEM_LOG_LIDX_MASK, - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDELT); - - writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & - D40_SREG_ELEM_LOG_LIDX_MASK, - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSELT); - + writel(lidx, chanbase + D40_CHAN_REG_SSELT); + writel(lidx, chanbase + D40_CHAN_REG_SDELT); } } @@ -843,12 +833,12 @@ static u32 d40_residue(struct d40_chan *d40c) if (d40c->log_num != D40_PHY_CHAN) num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) >> D40_MEM_LCSP2_ECNT_POS; - else - num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDELT) & - D40_SREG_ELEM_PHY_ECNT_MASK) >> - D40_SREG_ELEM_PHY_ECNT_POS; + else { + u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT); + num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK) + >> D40_SREG_ELEM_PHY_ECNT_POS; + } + return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); } @@ -859,10 +849,9 @@ static bool d40_tx_is_linked(struct d40_chan *d40c) if (d40c->log_num != D40_PHY_CHAN) is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; else - is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDLNK) & - D40_SREG_LNK_PHYS_LNK_MASK; + is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) + & D40_SREG_LNK_PHYS_LNK_MASK; + return is_link; } @@ -1550,6 +1539,7 @@ static int d40_free_dma(struct d40_chan *d40c) static bool d40_is_paused(struct d40_chan *d40c) { + void __iomem *chanbase = chan_base(d40c); bool is_paused = false; unsigned long flags; void __iomem *active_reg; @@ -1576,14 +1566,10 @@ static bool d40_is_paused(struct d40_chan *d40c) if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); - status = readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDLNK); + status = readl(chanbase + D40_CHAN_REG_SDLNK); } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); - status = readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSLNK); + status = readl(chanbase + D40_CHAN_REG_SSLNK); } else { dev_err(&d40c->chan.dev->device, "[%s] Unknown direction\n", __func__); -- cgit v1.2.3 From 724a8577d80c6f8e9ac680be1cf419eddbd6f2a1 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:08 +0100 Subject: dma40: use helpers for channel type check The somewhat confusing check d40c->log_num == D40_PHY_CHAN and its variants are used in several places to check if a channel is logical or physical. Use appropriately named helpers to do this to make the code more readable. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 54 +++++++++++++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 3d4cea3cff3..0073988cdaf 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -306,6 +306,16 @@ static struct device *chan2dev(struct d40_chan *d40c) return &d40c->chan.dev->device; } +static bool chan_is_physical(struct d40_chan *chan) +{ + return chan->log_num == D40_PHY_CHAN; +} + +static bool chan_is_logical(struct d40_chan *chan) +{ + return !chan_is_physical(chan); +} + static void __iomem *chan_base(struct d40_chan *chan) { return chan->base->virtbase + D40_DREG_PCBASE + @@ -400,7 +410,7 @@ static int d40_lcla_free_all(struct d40_chan *d40c, int i; int ret = -EINVAL; - if (d40c->log_num == D40_PHY_CHAN) + if (chan_is_physical(d40c)) return 0; spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); @@ -472,7 +482,7 @@ static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) { int curr_lcla = -EINVAL, next_lcla; - if (d40c->log_num == D40_PHY_CHAN) { + if (chan_is_physical(d40c)) { d40_phy_lli_write(d40c->base->virtbase, d40c->phy_chan->num, d40d->lli_phy.dst, @@ -788,7 +798,7 @@ static u32 d40_get_prmo(struct d40_chan *d40c) = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, }; - if (d40c->log_num == D40_PHY_CHAN) + if (chan_is_physical(d40c)) return phy_map[d40c->dma_cfg.mode_opt]; else return log_map[d40c->dma_cfg.mode_opt]; @@ -802,7 +812,7 @@ static void d40_config_write(struct d40_chan *d40c) /* Odd addresses are even addresses + 4 */ addr_base = (d40c->phy_chan->num % 2) * 4; /* Setup channel mode to logical or physical */ - var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) << + var = ((u32)(chan_is_logical(d40c)) + 1) << D40_CHAN_POS(d40c->phy_chan->num); writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); @@ -811,7 +821,7 @@ static void d40_config_write(struct d40_chan *d40c) writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & D40_SREG_ELEM_LOG_LIDX_MASK; void __iomem *chanbase = chan_base(d40c); @@ -830,7 +840,7 @@ static u32 d40_residue(struct d40_chan *d40c) { u32 num_elt; - if (d40c->log_num != D40_PHY_CHAN) + if (chan_is_logical(d40c)) num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) >> D40_MEM_LCSP2_ECNT_POS; else { @@ -846,7 +856,7 @@ static bool d40_tx_is_linked(struct d40_chan *d40c) { bool is_link; - if (d40c->log_num != D40_PHY_CHAN) + if (chan_is_logical(d40c)) is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; else is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) @@ -869,7 +879,7 @@ static int d40_pause(struct dma_chan *chan) res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); if (res == 0) { - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { d40_config_set_event(d40c, false); /* Resume the other logical channels if any */ if (d40_chan_has_events(d40c)) @@ -895,7 +905,7 @@ static int d40_resume(struct dma_chan *chan) spin_lock_irqsave(&d40c->lock, flags); if (d40c->base->rev == 0) - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); goto no_suspend; @@ -904,7 +914,7 @@ static int d40_resume(struct dma_chan *chan) /* If bytes left to transfer or linked tx resume job */ if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { - if (d40c->log_num != D40_PHY_CHAN) + if (chan_is_logical(d40c)) d40_config_set_event(d40c, true); res = d40_channel_execute_command(d40c, D40_DMA_RUN); @@ -944,7 +954,7 @@ static int d40_start(struct d40_chan *d40c) if (d40c->base->rev == 0) { int err; - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); if (err) @@ -952,7 +962,7 @@ static int d40_start(struct d40_chan *d40c) } } - if (d40c->log_num != D40_PHY_CHAN) + if (chan_is_logical(d40c)) d40_config_set_event(d40c, true); return d40_channel_execute_command(d40c, D40_DMA_RUN); @@ -1495,7 +1505,7 @@ static int d40_free_dma(struct d40_chan *d40c) return res; } - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { /* Release logical channel, deactivate the event line */ d40_config_set_event(d40c, false); @@ -1548,7 +1558,7 @@ static bool d40_is_paused(struct d40_chan *d40c) spin_lock_irqsave(&d40c->lock, flags); - if (d40c->log_num == D40_PHY_CHAN) { + if (chan_is_physical(d40c)) { if (d40c->phy_chan->num % 2 == 0) active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; else @@ -1638,7 +1648,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40d->lli_current = 0; d40d->txd.flags = dma_flags; - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { dev_err(&d40c->chan.dev->device, @@ -1765,9 +1775,9 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) /* Fill in basic CFG register values */ d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, - &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); + &d40c->dst_def_cfg, chan_is_logical(d40c)); - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { d40_log_cfg(&d40c->dma_cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); @@ -1857,7 +1867,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, d40d->txd.tx_submit = d40_tx_submit; - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { dev_err(&d40c->chan.dev->device, @@ -2093,7 +2103,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, if (d40d == NULL) goto err; - if (d40c->log_num != D40_PHY_CHAN) + if (chan_is_logical(d40c)) err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, direction, dma_flags); else @@ -2103,7 +2113,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, dev_err(&d40c->chan.dev->device, "[%s] Failed to prepare %s slave sg job: %d\n", __func__, - d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); + chan_is_logical(d40c) ? "log" : "phy", err); goto err; } @@ -2253,7 +2263,7 @@ static void d40_set_runtime_config(struct dma_chan *chan, return; } - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { if (config_maxburst >= 16) psize = STEDMA40_PSIZE_LOG_16; else if (config_maxburst >= 8) @@ -2286,7 +2296,7 @@ static void d40_set_runtime_config(struct dma_chan *chan, cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; /* Fill in register values */ - if (d40c->log_num != D40_PHY_CHAN) + if (chan_is_logical(d40c)) d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); else d40_phy_cfg(cfg, &d40c->src_def_cfg, -- cgit v1.2.3 From 6db5a8ba11bf23d1618e392518f1684cbf2fe031 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:09 +0100 Subject: dma40: use helpers for error functions Almost every use of dev_err in this driver prints the function name. Abstract out wrappers to help with this and reduce code duplication. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 201 ++++++++++++++++++------------------------------ 1 file changed, 74 insertions(+), 127 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 0073988cdaf..ab3ca6af7db 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -322,6 +322,12 @@ static void __iomem *chan_base(struct d40_chan *chan) chan->phy_chan->num * D40_DREG_PCDELTA; } +#define d40_err(dev, format, arg...) \ + dev_err(dev, "[%s] " format, __func__, ## arg) + +#define chan_err(d40c, format, arg...) \ + d40_err(chan2dev(d40c), format, ## arg) + static int d40_pool_lli_alloc(struct d40_desc *d40d, int lli_len, bool is_log) { @@ -673,9 +679,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c, } if (i == D40_SUSPEND_MAX_IT) { - dev_err(&d40c->chan.dev->device, - "[%s]: unable to suspend the chl %d (log: %d) status %x\n", - __func__, d40c->phy_chan->num, d40c->log_num, + chan_err(d40c, + "unable to suspend the chl %d (log: %d) status %x\n", + d40c->phy_chan->num, d40c->log_num, status); dump_stack(); ret = -EBUSY; @@ -1143,9 +1149,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) if (!il[row].is_error) dma_tc_handle(d40c); else - dev_err(base->dev, - "[%s] IRQ chan: %ld offset %d idx %d\n", - __func__, chan, il[row].offset, idx); + d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", + chan, il[row].offset, idx); spin_unlock(&d40c->lock); } @@ -1164,8 +1169,7 @@ static int d40_validate_conf(struct d40_chan *d40c, bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; if (!conf->dir) { - dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n", - __func__); + chan_err(d40c, "Invalid direction.\n"); res = -EINVAL; } @@ -1173,46 +1177,40 @@ static int d40_validate_conf(struct d40_chan *d40c, d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && d40c->runtime_addr == 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Invalid TX channel address (%d)\n", - __func__, conf->dst_dev_type); + chan_err(d40c, "Invalid TX channel address (%d)\n", + conf->dst_dev_type); res = -EINVAL; } if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && d40c->runtime_addr == 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Invalid RX channel address (%d)\n", - __func__, conf->src_dev_type); + chan_err(d40c, "Invalid RX channel address (%d)\n", + conf->src_dev_type); res = -EINVAL; } if (conf->dir == STEDMA40_MEM_TO_PERIPH && dst_event_group == STEDMA40_DEV_DST_MEMORY) { - dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", - __func__); + chan_err(d40c, "Invalid dst\n"); res = -EINVAL; } if (conf->dir == STEDMA40_PERIPH_TO_MEM && src_event_group == STEDMA40_DEV_SRC_MEMORY) { - dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", - __func__); + chan_err(d40c, "Invalid src\n"); res = -EINVAL; } if (src_event_group == STEDMA40_DEV_SRC_MEMORY && dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { - dev_err(&d40c->chan.dev->device, - "[%s] No event line\n", __func__); + chan_err(d40c, "No event line\n"); res = -EINVAL; } if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && (src_event_group != dst_event_group)) { - dev_err(&d40c->chan.dev->device, - "[%s] Invalid event group\n", __func__); + chan_err(d40c, "Invalid event group\n"); res = -EINVAL; } @@ -1221,9 +1219,7 @@ static int d40_validate_conf(struct d40_chan *d40c, * DMAC HW supports it. Will be added to this driver, * in case any dma client requires it. */ - dev_err(&d40c->chan.dev->device, - "[%s] periph to periph not supported\n", - __func__); + chan_err(d40c, "periph to periph not supported\n"); res = -EINVAL; } @@ -1236,9 +1232,7 @@ static int d40_validate_conf(struct d40_chan *d40c, * src (burst x width) == dst (burst x width) */ - dev_err(&d40c->chan.dev->device, - "[%s] src (burst x width) != dst (burst x width)\n", - __func__); + chan_err(d40c, "src (burst x width) != dst (burst x width)\n"); res = -EINVAL; } @@ -1441,8 +1435,7 @@ static int d40_config_memcpy(struct d40_chan *d40c) dma_has_cap(DMA_SLAVE, cap)) { d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; } else { - dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", - __func__); + chan_err(d40c, "No memcpy\n"); return -EINVAL; } @@ -1473,15 +1466,13 @@ static int d40_free_dma(struct d40_chan *d40c) } if (phy == NULL) { - dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", - __func__); + chan_err(d40c, "phy == null\n"); return -EINVAL; } if (phy->allocated_src == D40_ALLOC_FREE && phy->allocated_dst == D40_ALLOC_FREE) { - dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", - __func__); + chan_err(d40c, "channel already free\n"); return -EINVAL; } @@ -1493,15 +1484,13 @@ static int d40_free_dma(struct d40_chan *d40c) event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); is_src = true; } else { - dev_err(&d40c->chan.dev->device, - "[%s] Unknown direction\n", __func__); + chan_err(d40c, "Unknown direction\n"); return -EINVAL; } res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); if (res) { - dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", - __func__); + chan_err(d40c, "suspend failed\n"); return res; } @@ -1521,9 +1510,8 @@ static int d40_free_dma(struct d40_chan *d40c) res = d40_channel_execute_command(d40c, D40_DMA_RUN); if (res) { - dev_err(&d40c->chan.dev->device, - "[%s] Executing RUN command\n", - __func__); + chan_err(d40c, + "Executing RUN command\n"); return res; } } @@ -1536,8 +1524,7 @@ static int d40_free_dma(struct d40_chan *d40c) /* Release physical channel */ res = d40_channel_execute_command(d40c, D40_DMA_STOP); if (res) { - dev_err(&d40c->chan.dev->device, - "[%s] Failed to stop channel\n", __func__); + chan_err(d40c, "Failed to stop channel\n"); return res; } d40c->phy_chan = NULL; @@ -1581,8 +1568,7 @@ static bool d40_is_paused(struct d40_chan *d40c) event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); status = readl(chanbase + D40_CHAN_REG_SSLNK); } else { - dev_err(&d40c->chan.dev->device, - "[%s] Unknown direction\n", __func__); + chan_err(d40c, "Unknown direction\n"); goto _exit; } @@ -1625,8 +1611,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, unsigned long flags; if (d40c->phy_chan == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Unallocated channel.\n", __func__); + chan_err(d40c, "Unallocated channel.\n"); return ERR_PTR(-EINVAL); } @@ -1640,8 +1625,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40c->dma_cfg.src_info.data_width, d40c->dma_cfg.dst_info.data_width); if (d40d->lli_len < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Unaligned size\n", __func__); + chan_err(d40c, "Unaligned size\n"); goto err; } @@ -1651,8 +1635,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, if (chan_is_logical(d40c)) { if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Out of memory\n", __func__); + chan_err(d40c, "Out of memory\n"); goto err; } @@ -1671,8 +1654,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40c->dma_cfg.src_info.data_width); } else { if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Out of memory\n", __func__); + chan_err(d40c, "Out of memory\n"); goto err; } @@ -1758,9 +1740,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) if (!d40c->configured) { err = d40_config_memcpy(d40c); if (err) { - dev_err(&d40c->chan.dev->device, - "[%s] Failed to configure memcpy channel\n", - __func__); + chan_err(d40c, "Failed to configure memcpy channel\n"); goto fail; } } @@ -1768,8 +1748,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) err = d40_allocate_channel(d40c); if (err) { - dev_err(&d40c->chan.dev->device, - "[%s] Failed to allocate channel\n", __func__); + chan_err(d40c, "Failed to allocate channel\n"); goto fail; } @@ -1810,8 +1789,7 @@ static void d40_free_chan_resources(struct dma_chan *chan) unsigned long flags; if (d40c->phy_chan == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Cannot free unallocated channel\n", __func__); + chan_err(d40c, "Cannot free unallocated channel\n"); return; } @@ -1821,8 +1799,7 @@ static void d40_free_chan_resources(struct dma_chan *chan) err = d40_free_dma(d40c); if (err) - dev_err(&d40c->chan.dev->device, - "[%s] Failed to free channel\n", __func__); + chan_err(d40c, "Failed to free channel\n"); spin_unlock_irqrestore(&d40c->lock, flags); } @@ -1838,8 +1815,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, unsigned long flags; if (d40c->phy_chan == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Channel is not allocated.\n", __func__); + chan_err(d40c, "Channel is not allocated.\n"); return ERR_PTR(-EINVAL); } @@ -1847,8 +1823,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, d40d = d40_desc_get(d40c); if (d40d == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Descriptor is NULL\n", __func__); + chan_err(d40c, "Descriptor is NULL\n"); goto err; } @@ -1857,8 +1832,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, d40c->dma_cfg.src_info.data_width, d40c->dma_cfg.dst_info.data_width); if (d40d->lli_len < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Unaligned size\n", __func__); + chan_err(d40c, "Unaligned size\n"); goto err; } @@ -1870,8 +1844,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, if (chan_is_logical(d40c)) { if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Out of memory\n", __func__); + chan_err(d40c, "Out of memory\n"); goto err; } d40d->lli_current = 0; @@ -1897,8 +1870,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, } else { if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Out of memory\n", __func__); + chan_err(d40c, "Out of memory\n"); goto err; } @@ -1966,14 +1938,12 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, d40c->dma_cfg.src_info.data_width, d40c->dma_cfg.dst_info.data_width); if (d40d->lli_len < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Unaligned size\n", __func__); + chan_err(d40c, "Unaligned size\n"); return -EINVAL; } if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Out of memory\n", __func__); + chan_err(d40c, "Out of memory\n"); return -ENOMEM; } @@ -2022,14 +1992,12 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, d40c->dma_cfg.src_info.data_width, d40c->dma_cfg.dst_info.data_width); if (d40d->lli_len < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Unaligned size\n", __func__); + chan_err(d40c, "Unaligned size\n"); return -EINVAL; } if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Out of memory\n", __func__); + chan_err(d40c, "Out of memory\n"); return -ENOMEM; } @@ -2092,8 +2060,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, int err; if (d40c->phy_chan == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Cannot prepare unallocated channel\n", __func__); + chan_err(d40c, "Cannot prepare unallocated channel\n"); return ERR_PTR(-EINVAL); } @@ -2110,9 +2077,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, direction, dma_flags); if (err) { - dev_err(&d40c->chan.dev->device, - "[%s] Failed to prepare %s slave sg job: %d\n", - __func__, + chan_err(d40c, "Failed to prepare %s slave sg job: %d\n", chan_is_logical(d40c) ? "log" : "phy", err); goto err; } @@ -2143,9 +2108,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, int ret; if (d40c->phy_chan == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Cannot read status of unallocated channel\n", - __func__); + chan_err(d40c, "Cannot read status of unallocated channel\n"); return -EINVAL; } @@ -2169,8 +2132,7 @@ static void d40_issue_pending(struct dma_chan *chan) unsigned long flags; if (d40c->phy_chan == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Channel is not allocated!\n", __func__); + chan_err(d40c, "Channel is not allocated!\n"); return; } @@ -2321,8 +2283,7 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); if (d40c->phy_chan == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Channel is not allocated!\n", __func__); + chan_err(d40c, "Channel is not allocated!\n"); return -EINVAL; } @@ -2404,9 +2365,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, err = dma_async_device_register(&base->dma_slave); if (err) { - dev_err(base->dev, - "[%s] Failed to register slave channels\n", - __func__); + d40_err(base->dev, "Failed to register slave channels\n"); goto failure1; } @@ -2435,9 +2394,8 @@ static int __init d40_dmaengine_init(struct d40_base *base, err = dma_async_device_register(&base->dma_memcpy); if (err) { - dev_err(base->dev, - "[%s] Failed to regsiter memcpy only channels\n", - __func__); + d40_err(base->dev, + "Failed to regsiter memcpy only channels\n"); goto failure2; } @@ -2462,9 +2420,8 @@ static int __init d40_dmaengine_init(struct d40_base *base, err = dma_async_device_register(&base->dma_both); if (err) { - dev_err(base->dev, - "[%s] Failed to register logical and physical capable channels\n", - __func__); + d40_err(base->dev, + "Failed to register logical and physical capable channels\n"); goto failure3; } return 0; @@ -2566,8 +2523,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { - dev_err(&pdev->dev, "[%s] No matching clock found\n", - __func__); + d40_err(&pdev->dev, "No matching clock found\n"); goto failure; } @@ -2590,9 +2546,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { if (dma_id_regs[i].val != readl(virtbase + dma_id_regs[i].reg)) { - dev_err(&pdev->dev, - "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", - __func__, + d40_err(&pdev->dev, + "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", dma_id_regs[i].val, dma_id_regs[i].reg, readl(virtbase + dma_id_regs[i].reg)); @@ -2605,9 +2560,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != D40_HW_DESIGNER) { - dev_err(&pdev->dev, - "[%s] Unknown designer! Got %x wanted %x\n", - __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK, + d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", + val & D40_DREG_PERIPHID2_DESIGNER_MASK, D40_HW_DESIGNER); goto failure; } @@ -2637,7 +2591,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) sizeof(struct d40_chan), GFP_KERNEL); if (base == NULL) { - dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); + d40_err(&pdev->dev, "Out of memory\n"); goto failure; } @@ -2809,9 +2763,8 @@ static int __init d40_lcla_allocate(struct d40_base *base) base->lcla_pool.pages); if (!page_list[i]) { - dev_err(base->dev, - "[%s] Failed to allocate %d pages.\n", - __func__, base->lcla_pool.pages); + d40_err(base->dev, "Failed to allocate %d pages.\n", + base->lcla_pool.pages); for (j = 0; j < i; j++) free_pages(page_list[j], base->lcla_pool.pages); @@ -2881,9 +2834,7 @@ static int __init d40_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); if (!res) { ret = -ENOENT; - dev_err(&pdev->dev, - "[%s] No \"lcpa\" memory resource\n", - __func__); + d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); goto failure; } base->lcpa_size = resource_size(res); @@ -2892,9 +2843,9 @@ static int __init d40_probe(struct platform_device *pdev) if (request_mem_region(res->start, resource_size(res), D40_NAME " I/O lcpa") == NULL) { ret = -EBUSY; - dev_err(&pdev->dev, - "[%s] Failed to request LCPA region 0x%x-0x%x\n", - __func__, res->start, res->end); + d40_err(&pdev->dev, + "Failed to request LCPA region 0x%x-0x%x\n", + res->start, res->end); goto failure; } @@ -2910,16 +2861,13 @@ static int __init d40_probe(struct platform_device *pdev) base->lcpa_base = ioremap(res->start, resource_size(res)); if (!base->lcpa_base) { ret = -ENOMEM; - dev_err(&pdev->dev, - "[%s] Failed to ioremap LCPA region\n", - __func__); + d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); goto failure; } ret = d40_lcla_allocate(base); if (ret) { - dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n", - __func__); + d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); goto failure; } @@ -2928,9 +2876,8 @@ static int __init d40_probe(struct platform_device *pdev) base->irq = platform_get_irq(pdev, 0); ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); - if (ret) { - dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); + d40_err(&pdev->dev, "No IRQ defined\n"); goto failure; } @@ -2973,7 +2920,7 @@ failure: kfree(base); } - dev_err(&pdev->dev, "[%s] probe failed\n", __func__); + d40_err(&pdev->dev, "probe failed\n"); return ret; } -- cgit v1.2.3 From 4d5949009e585b2bcf09dc4de625351f987a1e6d Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:10 +0100 Subject: dma40: fix comment to refer to SOCs rather than boards And add DB8500v2 information. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index ab3ca6af7db..0faae662ad1 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2497,9 +2497,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, /* * D40_DREG_PERIPHID2 Depends on HW revision: - * MOP500/HREF ED has 0x0008, + * DB8500ed has 0x0008, * ? has 0x0018, - * HREF V1 has 0x0028 + * DB8500v1 has 0x0028 + * DB8500v2 has 0x0038 */ { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, -- cgit v1.2.3 From ac2c0a387194f45c759572b3462d1bf92ec92f00 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:11 +0100 Subject: dma40: allow realtime and priority for event lines DB8500v2's DMA40 (revision 3) allows setting event lines as high priority and real time. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- arch/arm/plat-nomadik/include/plat/ste_dma40.h | 3 +++ drivers/dma/ste_dma40.c | 34 ++++++++++++++++++++++++++ drivers/dma/ste_dma40_ll.h | 16 ++++++++++++ 3 files changed, 53 insertions(+) diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h index 4d6dd4c39b7..8358f857a29 100644 --- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h +++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h @@ -104,6 +104,8 @@ struct stedma40_half_channel_info { * * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH * @high_priority: true if high-priority + * @realtime: true if realtime mode is to be enabled. Only available on DMA40 + * version 3+, i.e DB8500v2+ * @mode: channel mode: physical, logical, or operation * @mode_opt: options for the chosen channel mode * @src_dev_type: Src device type @@ -119,6 +121,7 @@ struct stedma40_half_channel_info { struct stedma40_chan_cfg { enum stedma40_xfer_dir dir; bool high_priority; + bool realtime; enum stedma40_mode mode; enum stedma40_mode_opt mode_opt; int src_dev_type; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 0faae662ad1..ce551622158 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1724,6 +1724,38 @@ bool stedma40_filter(struct dma_chan *chan, void *data) } EXPORT_SYMBOL(stedma40_filter); +static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) +{ + bool realtime = d40c->dma_cfg.realtime; + bool highprio = d40c->dma_cfg.high_priority; + u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1; + u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1; + u32 event = D40_TYPE_TO_EVENT(dev_type); + u32 group = D40_TYPE_TO_GROUP(dev_type); + u32 bit = 1 << event; + + /* Destination event lines are stored in the upper halfword */ + if (!src) + bit <<= 16; + + writel(bit, d40c->base->virtbase + prioreg + group * 4); + writel(bit, d40c->base->virtbase + rtreg + group * 4); +} + +static void d40_set_prio_realtime(struct d40_chan *d40c) +{ + if (d40c->base->rev < 3) + return; + + if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || + (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) + __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); + + if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || + (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) + __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); +} + /* DMA ENGINE functions */ static int d40_alloc_chan_resources(struct dma_chan *chan) { @@ -1756,6 +1788,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg, chan_is_logical(d40c)); + d40_set_prio_realtime(d40c); + if (chan_is_logical(d40c)) { d40_log_cfg(&d40c->dma_cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 9cc43495bea..e93f394187a 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -163,6 +163,22 @@ #define D40_DREG_LCEIS1 0x0B4 #define D40_DREG_LCEIS2 0x0B8 #define D40_DREG_LCEIS3 0x0BC +#define D40_DREG_PSEG1 0x110 +#define D40_DREG_PSEG2 0x114 +#define D40_DREG_PSEG3 0x118 +#define D40_DREG_PSEG4 0x11C +#define D40_DREG_PCEG1 0x120 +#define D40_DREG_PCEG2 0x124 +#define D40_DREG_PCEG3 0x128 +#define D40_DREG_PCEG4 0x12C +#define D40_DREG_RSEG1 0x130 +#define D40_DREG_RSEG2 0x134 +#define D40_DREG_RSEG3 0x138 +#define D40_DREG_RSEG4 0x13C +#define D40_DREG_RCEG1 0x140 +#define D40_DREG_RCEG2 0x144 +#define D40_DREG_RCEG3 0x148 +#define D40_DREG_RCEG4 0x14C #define D40_DREG_STFU 0xFC8 #define D40_DREG_ICFG 0xFCC #define D40_DREG_PERIPHID0 0xFE0 -- cgit v1.2.3 From 594ece4dc0e8ac222945ef2048430600ad3c7644 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:12 +0100 Subject: dma40: remove unnecessary ALIGN()s ALIGN(x * y, y) == x * y ALIGN(aligned + x * y, y) == aligned + x * y Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index ce551622158..d32a9ac8608 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -344,7 +344,7 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d, d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); d40d->lli_pool.base = NULL; } else { - d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align); + d40d->lli_pool.size = lli_len * 2 * align; base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); d40d->lli_pool.base = base; @@ -356,13 +356,11 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d, if (is_log) { d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, align); - d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len, - align); + d40d->lli_log.dst = d40d->lli_log.src + lli_len; } else { d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, align); - d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, - align); + d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; } return 0; -- cgit v1.2.3 From 7fe8be5a74eb058b0b48970caef83e0215f55944 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:13 +0100 Subject: dma40: use sg_dma_address() instead of sg_phys() The address to use for DMA should be taken from sg_dma_address() and not sg_phys(). Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40_ll.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 0b096a38322..6f03f580c49 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -272,7 +272,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, if (target) dst = target; else - dst = sg_phys(current_sg); + dst = sg_dma_address(current_sg); l_phys = ALIGN(lli_phys + (lli - lli_sg) * sizeof(struct d40_phy_lli), D40_LLI_ALIGN); @@ -416,7 +416,7 @@ int d40_log_sg_to_dev(struct scatterlist *sg, if (direction == DMA_TO_DEVICE) { lli_src = d40_log_buf_to_lli(lli_src, - sg_phys(current_sg), + sg_dma_address(current_sg), sg_dma_len(current_sg), lcsp->lcsp1, src_data_width, dst_data_width, @@ -431,7 +431,7 @@ int d40_log_sg_to_dev(struct scatterlist *sg, } else { lli_dst = d40_log_buf_to_lli(lli_dst, - sg_phys(current_sg), + sg_dma_address(current_sg), sg_dma_len(current_sg), lcsp->lcsp3, dst_data_width, src_data_width, @@ -491,7 +491,7 @@ int d40_log_sg_to_lli(struct scatterlist *sg, for_each_sg(sg, current_sg, sg_len, i) { total_size += sg_dma_len(current_sg); lli = d40_log_buf_to_lli(lli, - sg_phys(current_sg), + sg_dma_address(current_sg), sg_dma_len(current_sg), lcsp13, data_width1, data_width2, true); -- cgit v1.2.3 From 026cbc424a162e495ad29e91d354fb8fc2da2657 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:14 +0100 Subject: dma40: fix DMA API usage for LCLA Map the buffer once and use dma_sync*() appropriately instead of mapping the buffer over and over without unmapping it. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index d32a9ac8608..f08e5c49c5d 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -128,6 +128,7 @@ struct d40_desc { */ struct d40_lcla_pool { void *base; + dma_addr_t dma_addr; void *base_unaligned; int pages; spinlock_t lock; @@ -504,25 +505,25 @@ static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) d40d->lli_current++; for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) { - struct d40_log_lli *lcla; + unsigned int lcla_offset = d40c->phy_chan->num * 1024 + + 8 * curr_lcla * 2; + struct d40_lcla_pool *pool = &d40c->base->lcla_pool; + struct d40_log_lli *lcla = pool->base + lcla_offset; if (d40d->lli_current + 1 < d40d->lli_len) next_lcla = d40_lcla_alloc_one(d40c, d40d); else next_lcla = -EINVAL; - lcla = d40c->base->lcla_pool.base + - d40c->phy_chan->num * 1024 + - 8 * curr_lcla * 2; - d40_log_lli_lcla_write(lcla, &d40d->lli_log.dst[d40d->lli_current], &d40d->lli_log.src[d40d->lli_current], next_lcla); - (void) dma_map_single(d40c->base->dev, lcla, - 2 * sizeof(struct d40_log_lli), - DMA_TO_DEVICE); + dma_sync_single_range_for_device(d40c->base->dev, + pool->dma_addr, lcla_offset, + 2 * sizeof(struct d40_log_lli), + DMA_TO_DEVICE); curr_lcla = next_lcla; @@ -2771,6 +2772,7 @@ static void __init d40_hw_init(struct d40_base *base) static int __init d40_lcla_allocate(struct d40_base *base) { + struct d40_lcla_pool *pool = &base->lcla_pool; unsigned long *page_list; int i, j; int ret = 0; @@ -2835,6 +2837,15 @@ static int __init d40_lcla_allocate(struct d40_base *base) LCLA_ALIGNMENT); } + pool->dma_addr = dma_map_single(base->dev, pool->base, + SZ_1K * base->num_phy_chans, + DMA_TO_DEVICE); + if (dma_mapping_error(base->dev, pool->dma_addr)) { + pool->dma_addr = 0; + ret = -ENOMEM; + goto failure; + } + writel(virt_to_phys(base->lcla_pool.base), base->virtbase + D40_DREG_LCLA); failure: @@ -2929,6 +2940,12 @@ failure: kmem_cache_destroy(base->desc_slab); if (base->virtbase) iounmap(base->virtbase); + + if (base->lcla_pool.dma_addr) + dma_unmap_single(base->dev, base->lcla_pool.dma_addr, + SZ_1K * base->num_phy_chans, + DMA_TO_DEVICE); + if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) free_pages((unsigned long)base->lcla_pool.base, base->lcla_pool.pages); -- cgit v1.2.3 From b00f938c8cf5ba8e7a692519548a256aa3ea1203 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:15 +0100 Subject: dma40: fix DMA API usage for LLIs Map and unmap the LLIs and use dma_sync_single_for_device() appropriately instead of mapping and never unmapping them. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 58 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 17 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index f08e5c49c5d..b5856864d48 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -68,6 +68,7 @@ enum d40_command { * @base: Pointer to memory area when the pre_alloc_lli's are not large * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if * pre_alloc_lli is used. + * @dma_addr: DMA address, if mapped * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. * @pre_alloc_lli: Pre allocated area for the most common case of transfers, * one buffer to one buffer. @@ -75,6 +76,7 @@ enum d40_command { struct d40_lli_pool { void *base; int size; + dma_addr_t dma_addr; /* Space for dst and src, plus an extra for padding */ u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; }; @@ -329,7 +331,7 @@ static void __iomem *chan_base(struct d40_chan *chan) #define chan_err(d40c, format, arg...) \ d40_err(chan2dev(d40c), format, ## arg) -static int d40_pool_lli_alloc(struct d40_desc *d40d, +static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, int lli_len, bool is_log) { u32 align; @@ -358,17 +360,36 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d, d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, align); d40d->lli_log.dst = d40d->lli_log.src + lli_len; + + d40d->lli_pool.dma_addr = 0; } else { d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, align); d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; + + d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, + d40d->lli_phy.src, + d40d->lli_pool.size, + DMA_TO_DEVICE); + + if (dma_mapping_error(d40c->base->dev, + d40d->lli_pool.dma_addr)) { + kfree(d40d->lli_pool.base); + d40d->lli_pool.base = NULL; + d40d->lli_pool.dma_addr = 0; + return -ENOMEM; + } } return 0; } -static void d40_pool_lli_free(struct d40_desc *d40d) +static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d) { + if (d40d->lli_pool.dma_addr) + dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, + d40d->lli_pool.size, DMA_TO_DEVICE); + kfree(d40d->lli_pool.base); d40d->lli_pool.base = NULL; d40d->lli_pool.size = 0; @@ -454,7 +475,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) list_for_each_entry_safe(d, _d, &d40c->client, node) if (async_tx_test_ack(&d->txd)) { - d40_pool_lli_free(d); + d40_pool_lli_free(d40c, d); d40_desc_remove(d); desc = d; memset(desc, 0, sizeof(*desc)); @@ -474,6 +495,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) { + d40_pool_lli_free(d40c, d40d); d40_lcla_free_all(d40c, d40d); kmem_cache_free(d40c->base->desc_slab, d40d); } @@ -1063,7 +1085,7 @@ static void dma_tasklet(unsigned long data) callback_param = d40d->txd.callback_param; if (async_tx_test_ack(&d40d->txd)) { - d40_pool_lli_free(d40d); + d40_pool_lli_free(d40c, d40d); d40_desc_remove(d40d); d40_desc_free(d40c, d40d); } else { @@ -1459,7 +1481,7 @@ static int d40_free_dma(struct d40_chan *d40c) /* Release client owned descriptors */ if (!list_empty(&d40c->client)) list_for_each_entry_safe(d, _d, &d40c->client, node) { - d40_pool_lli_free(d); + d40_pool_lli_free(d40c, d); d40_desc_remove(d); d40_desc_free(d40c, d); } @@ -1633,7 +1655,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, if (chan_is_logical(d40c)) { - if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { + if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) { chan_err(d40c, "Out of memory\n"); goto err; } @@ -1652,7 +1674,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40c->dma_cfg.dst_info.data_width, d40c->dma_cfg.src_info.data_width); } else { - if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { + if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { chan_err(d40c, "Out of memory\n"); goto err; } @@ -1683,8 +1705,9 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, if (res < 0) goto err; - (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, - d40d->lli_pool.size, DMA_TO_DEVICE); + dma_sync_single_for_device(d40c->base->dev, + d40d->lli_pool.dma_addr, + d40d->lli_pool.size, DMA_TO_DEVICE); } dma_async_tx_descriptor_init(&d40d->txd, chan); @@ -1876,7 +1899,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, if (chan_is_logical(d40c)) { - if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { + if (d40_pool_lli_alloc(d40c,d40d, d40d->lli_len, true) < 0) { chan_err(d40c, "Out of memory\n"); goto err; } @@ -1902,7 +1925,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, } else { - if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { + if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { chan_err(d40c, "Out of memory\n"); goto err; } @@ -1931,8 +1954,9 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, false) == NULL) goto err; - (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, - d40d->lli_pool.size, DMA_TO_DEVICE); + dma_sync_single_for_device(d40c->base->dev, + d40d->lli_pool.dma_addr, + d40d->lli_pool.size, DMA_TO_DEVICE); } spin_unlock_irqrestore(&d40c->lock, flags); @@ -1975,7 +1999,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, return -EINVAL; } - if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { + if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) { chan_err(d40c, "Out of memory\n"); return -ENOMEM; } @@ -2029,7 +2053,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, return -EINVAL; } - if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { + if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { chan_err(d40c, "Out of memory\n"); return -ENOMEM; } @@ -2075,8 +2099,8 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, if (res < 0) return res; - (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, - d40d->lli_pool.size, DMA_TO_DEVICE); + dma_sync_single_for_device(d40c->base->dev, d40d->lli_pool.dma_addr, + d40d->lli_pool.size, DMA_TO_DEVICE); return 0; } -- cgit v1.2.3 From d924abad7fa9a78d70b20552bf27fe4f7a19a2fb Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:16 +0100 Subject: dma40: remove unnecessary casts Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index b5856864d48..bd72269dac0 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -357,14 +357,12 @@ static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, } if (is_log) { - d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, - align); + d40d->lli_log.src = PTR_ALIGN(base, align); d40d->lli_log.dst = d40d->lli_log.src + lli_len; d40d->lli_pool.dma_addr = 0; } else { - d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, - align); + d40d->lli_phy.src = PTR_ALIGN(base, align); d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, -- cgit v1.2.3 From 95944c6ef5b5214508273992416adb836b63c73f Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:17 +0100 Subject: dma40: implement prep_memcpy as a wrapper around memcpy_sg To simplify the code. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 107 ++++----------------------------------------- drivers/dma/ste_dma40_ll.c | 2 +- drivers/dma/ste_dma40_ll.h | 11 ----- 3 files changed, 10 insertions(+), 110 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index bd72269dac0..0a20179349b 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1863,108 +1863,19 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, size_t size, unsigned long dma_flags) { - struct d40_desc *d40d; - struct d40_chan *d40c = container_of(chan, struct d40_chan, - chan); - unsigned long flags; - - if (d40c->phy_chan == NULL) { - chan_err(d40c, "Channel is not allocated.\n"); - return ERR_PTR(-EINVAL); - } - - spin_lock_irqsave(&d40c->lock, flags); - d40d = d40_desc_get(d40c); - - if (d40d == NULL) { - chan_err(d40c, "Descriptor is NULL\n"); - goto err; - } - - d40d->txd.flags = dma_flags; - d40d->lli_len = d40_size_2_dmalen(size, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width); - if (d40d->lli_len < 0) { - chan_err(d40c, "Unaligned size\n"); - goto err; - } - - - dma_async_tx_descriptor_init(&d40d->txd, chan); - - d40d->txd.tx_submit = d40_tx_submit; - - if (chan_is_logical(d40c)) { - - if (d40_pool_lli_alloc(d40c,d40d, d40d->lli_len, true) < 0) { - chan_err(d40c, "Out of memory\n"); - goto err; - } - d40d->lli_current = 0; - - if (d40_log_buf_to_lli(d40d->lli_log.src, - src, - size, - d40c->log_def.lcsp1, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - true) == NULL) - goto err; - - if (d40_log_buf_to_lli(d40d->lli_log.dst, - dst, - size, - d40c->log_def.lcsp3, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.data_width, - true) == NULL) - goto err; - - } else { - - if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { - chan_err(d40c, "Out of memory\n"); - goto err; - } - - if (d40_phy_buf_to_lli(d40d->lli_phy.src, - src, - size, - d40c->dma_cfg.src_info.psize, - 0, - d40c->src_def_cfg, - true, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - false) == NULL) - goto err; + struct scatterlist dst_sg; + struct scatterlist src_sg; - if (d40_phy_buf_to_lli(d40d->lli_phy.dst, - dst, - size, - d40c->dma_cfg.dst_info.psize, - 0, - d40c->dst_def_cfg, - true, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.data_width, - false) == NULL) - goto err; + sg_init_table(&dst_sg, 1); + sg_init_table(&src_sg, 1); - dma_sync_single_for_device(d40c->base->dev, - d40d->lli_pool.dma_addr, - d40d->lli_pool.size, DMA_TO_DEVICE); - } + sg_dma_address(&dst_sg) = dst; + sg_dma_address(&src_sg) = src; - spin_unlock_irqrestore(&d40c->lock, flags); - return &d40d->txd; + sg_dma_len(&dst_sg) = size; + sg_dma_len(&src_sg) = size; -err: - if (d40d) - d40_desc_free(d40c, d40d); - spin_unlock_irqrestore(&d40c->lock, flags); - return NULL; + return stedma40_memcpy_sg(chan, &dst_sg, &src_sg, 1, dma_flags); } static struct dma_async_tx_descriptor * diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 6f03f580c49..552c5972c75 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -198,7 +198,7 @@ static int d40_seg_size(int size, int data_width1, int data_width2) return seg_max; } -struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, +static struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, int psize, diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index e93f394187a..a5d71714ebf 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -312,17 +312,6 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, u32 data_width2, int psize); -struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, - dma_addr_t data, - u32 data_size, - int psize, - dma_addr_t next_lli, - u32 reg_cfg, - bool term_int, - u32 data_width1, - u32 data_width2, - bool is_device); - void d40_phy_lli_write(void __iomem *virtbase, u32 phy_chan_num, struct d40_phy_lli *lli_dst, -- cgit v1.2.3 From 5f81158f90db4bc8a79e91736aa3afce8e590e46 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:18 +0100 Subject: dma40: combine desc init functions The desc init code can be shared between the mem and slave prep routines. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 76 +++++++++++++++++++++---------------------------- 1 file changed, 32 insertions(+), 44 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 0a20179349b..5259a983243 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1617,6 +1617,35 @@ static u32 stedma40_residue(struct dma_chan *chan) return bytes_left; } +static struct d40_desc * +d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, + unsigned int sg_len, unsigned long dma_flags) +{ + struct stedma40_chan_cfg *cfg = &chan->dma_cfg; + struct d40_desc *desc; + + desc = d40_desc_get(chan); + if (!desc) + return NULL; + + desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, + cfg->dst_info.data_width); + if (desc->lli_len < 0) { + chan_err(chan, "Unaligned size\n"); + d40_desc_free(chan, desc); + + return NULL; + } + + desc->lli_current = 0; + desc->txd.flags = dma_flags; + desc->txd.tx_submit = d40_tx_submit; + + dma_async_tx_descriptor_init(&desc->txd, &chan->chan); + + return desc; +} + struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, struct scatterlist *sgl_dst, struct scatterlist *sgl_src, @@ -1635,22 +1664,11 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, } spin_lock_irqsave(&d40c->lock, flags); - d40d = d40_desc_get(d40c); - if (d40d == NULL) + d40d = d40_prep_desc(d40c, sgl_dst, sgl_len, dma_flags); + if (!d40d) goto err; - d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width); - if (d40d->lli_len < 0) { - chan_err(d40c, "Unaligned size\n"); - goto err; - } - - d40d->lli_current = 0; - d40d->txd.flags = dma_flags; - if (chan_is_logical(d40c)) { if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) { @@ -1708,10 +1726,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40d->lli_pool.size, DMA_TO_DEVICE); } - dma_async_tx_descriptor_init(&d40d->txd, chan); - - d40d->txd.tx_submit = d40_tx_submit; - spin_unlock_irqrestore(&d40c->lock, flags); return &d40d->txd; @@ -1900,21 +1914,11 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, dma_addr_t dev_addr = 0; int total_size; - d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width); - if (d40d->lli_len < 0) { - chan_err(d40c, "Unaligned size\n"); - return -EINVAL; - } - if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) { chan_err(d40c, "Out of memory\n"); return -ENOMEM; } - d40d->lli_current = 0; - if (direction == DMA_FROM_DEVICE) if (d40c->runtime_addr) dev_addr = d40c->runtime_addr; @@ -1954,21 +1958,11 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, dma_addr_t dst_dev_addr; int res; - d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width); - if (d40d->lli_len < 0) { - chan_err(d40c, "Unaligned size\n"); - return -EINVAL; - } - if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { chan_err(d40c, "Out of memory\n"); return -ENOMEM; } - d40d->lli_current = 0; - if (direction == DMA_FROM_DEVICE) { dst_dev_addr = 0; if (d40c->runtime_addr) @@ -2031,8 +2025,8 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, } spin_lock_irqsave(&d40c->lock, flags); - d40d = d40_desc_get(d40c); + d40d = d40_prep_desc(d40c, sgl, sg_len, dma_flags); if (d40d == NULL) goto err; @@ -2048,12 +2042,6 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, goto err; } - d40d->txd.flags = dma_flags; - - dma_async_tx_descriptor_init(&d40d->txd, chan); - - d40d->txd.tx_submit = d40_tx_submit; - spin_unlock_irqrestore(&d40c->lock, flags); return &d40d->txd; -- cgit v1.2.3 From dbd887880320b6a56811bb38ff4ad888728c3a91 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:19 +0100 Subject: dma40: combine duplicated d40_pool_lli_alloc() calls Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 38 ++++++++++++++------------------------ 1 file changed, 14 insertions(+), 24 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 5259a983243..495f9eb0a4b 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -332,8 +332,9 @@ static void __iomem *chan_base(struct d40_chan *chan) d40_err(chan2dev(d40c), format, ## arg) static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, - int lli_len, bool is_log) + int lli_len) { + bool is_log = chan_is_logical(d40c); u32 align; void *base; @@ -1623,6 +1624,7 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, { struct stedma40_chan_cfg *cfg = &chan->dma_cfg; struct d40_desc *desc; + int ret; desc = d40_desc_get(chan); if (!desc) @@ -1632,11 +1634,16 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, cfg->dst_info.data_width); if (desc->lli_len < 0) { chan_err(chan, "Unaligned size\n"); - d40_desc_free(chan, desc); + goto err; + } - return NULL; + ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); + if (ret < 0) { + chan_err(chan, "Could not allocate lli\n"); + goto err; } + desc->lli_current = 0; desc->txd.flags = dma_flags; desc->txd.tx_submit = d40_tx_submit; @@ -1644,6 +1651,10 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, dma_async_tx_descriptor_init(&desc->txd, &chan->chan); return desc; + +err: + d40_desc_free(chan, desc); + return NULL; } struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, @@ -1670,12 +1681,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, goto err; if (chan_is_logical(d40c)) { - - if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) { - chan_err(d40c, "Out of memory\n"); - goto err; - } - (void) d40_log_sg_to_lli(sgl_src, sgl_len, d40d->lli_log.src, @@ -1690,11 +1695,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40c->dma_cfg.dst_info.data_width, d40c->dma_cfg.src_info.data_width); } else { - if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { - chan_err(d40c, "Out of memory\n"); - goto err; - } - res = d40_phy_sg_to_lli(sgl_src, sgl_len, 0, @@ -1914,11 +1914,6 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, dma_addr_t dev_addr = 0; int total_size; - if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) { - chan_err(d40c, "Out of memory\n"); - return -ENOMEM; - } - if (direction == DMA_FROM_DEVICE) if (d40c->runtime_addr) dev_addr = d40c->runtime_addr; @@ -1958,11 +1953,6 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, dma_addr_t dst_dev_addr; int res; - if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { - chan_err(d40c, "Out of memory\n"); - return -ENOMEM; - } - if (direction == DMA_FROM_DEVICE) { dst_dev_addr = 0; if (d40c->runtime_addr) -- cgit v1.2.3 From 00ac0341486ffe212f45ff1fe0780d12a36fffde Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:20 +0100 Subject: dma40: remove duplicated dev addr code Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 66 ++++++++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 495f9eb0a4b..65b5aad1fc4 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1909,25 +1909,10 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, - unsigned long dma_flags) + dma_addr_t dev_addr) { - dma_addr_t dev_addr = 0; int total_size; - if (direction == DMA_FROM_DEVICE) - if (d40c->runtime_addr) - dev_addr = d40c->runtime_addr; - else - dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; - else if (direction == DMA_TO_DEVICE) - if (d40c->runtime_addr) - dev_addr = d40c->runtime_addr; - else - dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; - - else - return -EINVAL; - total_size = d40_log_sg_to_dev(sgl, sg_len, &d40d->lli_log, &d40c->log_def, @@ -1947,27 +1932,12 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, struct scatterlist *sgl, unsigned int sgl_len, enum dma_data_direction direction, - unsigned long dma_flags) + dma_addr_t dev_addr) { - dma_addr_t src_dev_addr; - dma_addr_t dst_dev_addr; + dma_addr_t src_dev_addr = direction == DMA_FROM_DEVICE ? dev_addr : 0; + dma_addr_t dst_dev_addr = direction == DMA_TO_DEVICE ? dev_addr : 0; int res; - if (direction == DMA_FROM_DEVICE) { - dst_dev_addr = 0; - if (d40c->runtime_addr) - src_dev_addr = d40c->runtime_addr; - else - src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; - } else if (direction == DMA_TO_DEVICE) { - if (d40c->runtime_addr) - dst_dev_addr = d40c->runtime_addr; - else - dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; - src_dev_addr = 0; - } else - return -EINVAL; - res = d40_phy_sg_to_lli(sgl, sgl_len, src_dev_addr, @@ -1997,6 +1967,24 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, return 0; } +static dma_addr_t +d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) +{ + struct stedma40_platform_data *plat = chan->base->plat_data; + struct stedma40_chan_cfg *cfg = &chan->dma_cfg; + dma_addr_t addr; + + if (chan->runtime_addr) + return chan->runtime_addr; + + if (direction == DMA_FROM_DEVICE) + addr = plat->dev_rx[cfg->src_dev_type]; + else if (direction == DMA_TO_DEVICE) + addr = plat->dev_tx[cfg->dst_dev_type]; + + return addr; +} + static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, @@ -2006,6 +1994,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, struct d40_desc *d40d; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); + dma_addr_t dev_addr; unsigned long flags; int err; @@ -2014,18 +2003,23 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, return ERR_PTR(-EINVAL); } + if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) + return NULL; + spin_lock_irqsave(&d40c->lock, flags); d40d = d40_prep_desc(d40c, sgl, sg_len, dma_flags); if (d40d == NULL) goto err; + dev_addr = d40_get_dev_addr(d40c, direction); + if (chan_is_logical(d40c)) err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, - direction, dma_flags); + direction, dev_addr); else err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, - direction, dma_flags); + direction, dev_addr); if (err) { chan_err(d40c, "Failed to prepare %s slave sg job: %d\n", chan_is_logical(d40c) ? "log" : "phy", err); -- cgit v1.2.3 From 3e3a0763e78b520dac5fde569c42664863336d94 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:21 +0100 Subject: dma40: combine mem and slave sg-to-lli functions Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 194 +++++++++++++++++++++--------------------------- 1 file changed, 84 insertions(+), 110 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 65b5aad1fc4..8c6abc23db8 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1618,6 +1618,81 @@ static u32 stedma40_residue(struct dma_chan *chan) return bytes_left; } +static int +d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, + struct scatterlist *sg_src, struct scatterlist *sg_dst, + unsigned int sg_len, enum dma_data_direction direction, + dma_addr_t dev_addr) +{ + struct stedma40_chan_cfg *cfg = &chan->dma_cfg; + struct stedma40_half_channel_info *src_info = &cfg->src_info; + struct stedma40_half_channel_info *dst_info = &cfg->dst_info; + + if (direction == DMA_NONE) { + /* memcpy */ + (void) d40_log_sg_to_lli(sg_src, sg_len, + desc->lli_log.src, + chan->log_def.lcsp1, + src_info->data_width, + dst_info->data_width); + + (void) d40_log_sg_to_lli(sg_dst, sg_len, + desc->lli_log.dst, + chan->log_def.lcsp3, + dst_info->data_width, + src_info->data_width); + } else { + unsigned int total_size; + + total_size = d40_log_sg_to_dev(sg_src, sg_len, + &desc->lli_log, + &chan->log_def, + src_info->data_width, + dst_info->data_width, + direction, dev_addr); + if (total_size < 0) + return -EINVAL; + } + + return 0; +} + +static int +d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, + struct scatterlist *sg_src, struct scatterlist *sg_dst, + unsigned int sg_len, enum dma_data_direction direction, + dma_addr_t dev_addr) +{ + dma_addr_t src_dev_addr = direction == DMA_FROM_DEVICE ? dev_addr : 0; + dma_addr_t dst_dev_addr = direction == DMA_TO_DEVICE ? dev_addr : 0; + struct stedma40_chan_cfg *cfg = &chan->dma_cfg; + struct stedma40_half_channel_info *src_info = &cfg->src_info; + struct stedma40_half_channel_info *dst_info = &cfg->dst_info; + int ret; + + ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, + desc->lli_phy.src, + virt_to_phys(desc->lli_phy.src), + chan->src_def_cfg, + src_info->data_width, + dst_info->data_width, + src_info->psize); + + ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, + desc->lli_phy.dst, + virt_to_phys(desc->lli_phy.dst), + chan->dst_def_cfg, + dst_info->data_width, + src_info->data_width, + dst_info->psize); + + dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, + desc->lli_pool.size, DMA_TO_DEVICE); + + return ret < 0 ? ret : 0; +} + + static struct d40_desc * d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, unsigned int sg_len, unsigned long dma_flags) @@ -1663,7 +1738,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, unsigned int sgl_len, unsigned long dma_flags) { - int res; struct d40_desc *d40d; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); @@ -1681,49 +1755,11 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, goto err; if (chan_is_logical(d40c)) { - (void) d40_log_sg_to_lli(sgl_src, - sgl_len, - d40d->lli_log.src, - d40c->log_def.lcsp1, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width); - - (void) d40_log_sg_to_lli(sgl_dst, - sgl_len, - d40d->lli_log.dst, - d40c->log_def.lcsp3, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.data_width); + d40_prep_sg_log(d40c, d40d, sgl_src, sgl_dst, + sgl_len, DMA_NONE, 0); } else { - res = d40_phy_sg_to_lli(sgl_src, - sgl_len, - 0, - d40d->lli_phy.src, - virt_to_phys(d40d->lli_phy.src), - d40c->src_def_cfg, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.psize); - - if (res < 0) - goto err; - - res = d40_phy_sg_to_lli(sgl_dst, - sgl_len, - 0, - d40d->lli_phy.dst, - virt_to_phys(d40d->lli_phy.dst), - d40c->dst_def_cfg, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.psize); - - if (res < 0) - goto err; - - dma_sync_single_for_device(d40c->base->dev, - d40d->lli_pool.dma_addr, - d40d->lli_pool.size, DMA_TO_DEVICE); + d40_prep_sg_phy(d40c, d40d, sgl_src, sgl_dst, + sgl_len, DMA_NONE, 0); } spin_unlock_irqrestore(&d40c->lock, flags); @@ -1904,69 +1940,6 @@ d40_prep_sg(struct dma_chan *chan, return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags); } -static int d40_prep_slave_sg_log(struct d40_desc *d40d, - struct d40_chan *d40c, - struct scatterlist *sgl, - unsigned int sg_len, - enum dma_data_direction direction, - dma_addr_t dev_addr) -{ - int total_size; - - total_size = d40_log_sg_to_dev(sgl, sg_len, - &d40d->lli_log, - &d40c->log_def, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - direction, - dev_addr); - - if (total_size < 0) - return -EINVAL; - - return 0; -} - -static int d40_prep_slave_sg_phy(struct d40_desc *d40d, - struct d40_chan *d40c, - struct scatterlist *sgl, - unsigned int sgl_len, - enum dma_data_direction direction, - dma_addr_t dev_addr) -{ - dma_addr_t src_dev_addr = direction == DMA_FROM_DEVICE ? dev_addr : 0; - dma_addr_t dst_dev_addr = direction == DMA_TO_DEVICE ? dev_addr : 0; - int res; - - res = d40_phy_sg_to_lli(sgl, - sgl_len, - src_dev_addr, - d40d->lli_phy.src, - virt_to_phys(d40d->lli_phy.src), - d40c->src_def_cfg, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.psize); - if (res < 0) - return res; - - res = d40_phy_sg_to_lli(sgl, - sgl_len, - dst_dev_addr, - d40d->lli_phy.dst, - virt_to_phys(d40d->lli_phy.dst), - d40c->dst_def_cfg, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.psize); - if (res < 0) - return res; - - dma_sync_single_for_device(d40c->base->dev, d40d->lli_pool.dma_addr, - d40d->lli_pool.size, DMA_TO_DEVICE); - return 0; -} - static dma_addr_t d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) { @@ -2015,11 +1988,12 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, dev_addr = d40_get_dev_addr(d40c, direction); if (chan_is_logical(d40c)) - err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, - direction, dev_addr); + err = d40_prep_sg_log(d40c, d40d, sgl, NULL, + sg_len, direction, dev_addr); else - err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, - direction, dev_addr); + err = d40_prep_sg_phy(d40c, d40d, sgl, NULL, + sg_len, direction, dev_addr); + if (err) { chan_err(d40c, "Failed to prepare %s slave sg job: %d\n", chan_is_logical(d40c) ? "log" : "phy", err); -- cgit v1.2.3 From 10a946b3a4e1ad665a81981cbe33c3d3903cd7da Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:22 +0100 Subject: dma40: remove export of stedma40_memcpy_sg The dmaengine framework has the API for this now. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- arch/arm/plat-nomadik/include/plat/ste_dma40.h | 19 ------------------- drivers/dma/ste_dma40.c | 2 +- 2 files changed, 1 insertion(+), 20 deletions(-) diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h index 8358f857a29..c44886062f8 100644 --- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h +++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h @@ -171,25 +171,6 @@ struct stedma40_platform_data { bool stedma40_filter(struct dma_chan *chan, void *data); -/** - * stedma40_memcpy_sg() - extension of the dma framework, memcpy to/from - * scattergatter lists. - * - * @chan: dmaengine handle - * @sgl_dst: Destination scatter list - * @sgl_src: Source scatter list - * @sgl_len: The length of each scatterlist. Both lists must be of equal length - * and each element must match the corresponding element in the other scatter - * list. - * @flags: is actually enum dma_ctrl_flags. See dmaengine.h - */ - -struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, - struct scatterlist *sgl_dst, - struct scatterlist *sgl_src, - unsigned int sgl_len, - unsigned long flags); - /** * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave * (=device) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 8c6abc23db8..0f5d61720ab 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1733,6 +1733,7 @@ err: } struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, +static struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, struct scatterlist *sgl_dst, struct scatterlist *sgl_src, unsigned int sgl_len, @@ -1771,7 +1772,6 @@ err: spin_unlock_irqrestore(&d40c->lock, flags); return NULL; } -EXPORT_SYMBOL(stedma40_memcpy_sg); bool stedma40_filter(struct dma_chan *chan, void *data) { -- cgit v1.2.3 From cade1d30b2e071a687011c2a38c03ed7187ec501 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:23 +0100 Subject: dma40: combine mem and slave prep_sg functions Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 156 +++++++++++++++++++----------------------------- 1 file changed, 62 insertions(+), 94 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 0f5d61720ab..4e9d6c5a713 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1732,44 +1732,70 @@ err: return NULL; } -struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, -static struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, - struct scatterlist *sgl_dst, - struct scatterlist *sgl_src, - unsigned int sgl_len, - unsigned long dma_flags) +static dma_addr_t +d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) { - struct d40_desc *d40d; - struct d40_chan *d40c = container_of(chan, struct d40_chan, - chan); + struct stedma40_platform_data *plat = chan->base->plat_data; + struct stedma40_chan_cfg *cfg = &chan->dma_cfg; + dma_addr_t addr; + + if (chan->runtime_addr) + return chan->runtime_addr; + + if (direction == DMA_FROM_DEVICE) + addr = plat->dev_rx[cfg->src_dev_type]; + else if (direction == DMA_TO_DEVICE) + addr = plat->dev_tx[cfg->dst_dev_type]; + + return addr; +} + +static struct dma_async_tx_descriptor * +d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, + struct scatterlist *sg_dst, unsigned int sg_len, + enum dma_data_direction direction, unsigned long dma_flags) +{ + struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); + dma_addr_t dev_addr = 0; + struct d40_desc *desc; unsigned long flags; + int ret; - if (d40c->phy_chan == NULL) { - chan_err(d40c, "Unallocated channel.\n"); - return ERR_PTR(-EINVAL); + if (!chan->phy_chan) { + chan_err(chan, "Cannot prepare unallocated channel\n"); + return NULL; } - spin_lock_irqsave(&d40c->lock, flags); + spin_lock_irqsave(&chan->lock, flags); - d40d = d40_prep_desc(d40c, sgl_dst, sgl_len, dma_flags); - if (!d40d) + desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); + if (desc == NULL) goto err; - if (chan_is_logical(d40c)) { - d40_prep_sg_log(d40c, d40d, sgl_src, sgl_dst, - sgl_len, DMA_NONE, 0); - } else { - d40_prep_sg_phy(d40c, d40d, sgl_src, sgl_dst, - sgl_len, DMA_NONE, 0); + if (direction != DMA_NONE) + dev_addr = d40_get_dev_addr(chan, direction); + + if (chan_is_logical(chan)) + ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, + sg_len, direction, dev_addr); + else + ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, + sg_len, direction, dev_addr); + + if (ret) { + chan_err(chan, "Failed to prepare %s sg job: %d\n", + chan_is_logical(chan) ? "log" : "phy", ret); + goto err; } - spin_unlock_irqrestore(&d40c->lock, flags); + spin_unlock_irqrestore(&chan->lock, flags); + + return &desc->txd; - return &d40d->txd; err: - if (d40d) - d40_desc_free(d40c, d40d); - spin_unlock_irqrestore(&d40c->lock, flags); + if (desc) + d40_desc_free(chan, desc); + spin_unlock_irqrestore(&chan->lock, flags); return NULL; } @@ -1925,37 +1951,19 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, sg_dma_len(&dst_sg) = size; sg_dma_len(&src_sg) = size; - return stedma40_memcpy_sg(chan, &dst_sg, &src_sg, 1, dma_flags); + return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags); } static struct dma_async_tx_descriptor * -d40_prep_sg(struct dma_chan *chan, - struct scatterlist *dst_sg, unsigned int dst_nents, - struct scatterlist *src_sg, unsigned int src_nents, - unsigned long dma_flags) +d40_prep_memcpy_sg(struct dma_chan *chan, + struct scatterlist *dst_sg, unsigned int dst_nents, + struct scatterlist *src_sg, unsigned int src_nents, + unsigned long dma_flags) { if (dst_nents != src_nents) return NULL; - return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags); -} - -static dma_addr_t -d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) -{ - struct stedma40_platform_data *plat = chan->base->plat_data; - struct stedma40_chan_cfg *cfg = &chan->dma_cfg; - dma_addr_t addr; - - if (chan->runtime_addr) - return chan->runtime_addr; - - if (direction == DMA_FROM_DEVICE) - addr = plat->dev_rx[cfg->src_dev_type]; - else if (direction == DMA_TO_DEVICE) - addr = plat->dev_tx[cfg->dst_dev_type]; - - return addr; + return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); } static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, @@ -1964,50 +1972,10 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, enum dma_data_direction direction, unsigned long dma_flags) { - struct d40_desc *d40d; - struct d40_chan *d40c = container_of(chan, struct d40_chan, - chan); - dma_addr_t dev_addr; - unsigned long flags; - int err; - - if (d40c->phy_chan == NULL) { - chan_err(d40c, "Cannot prepare unallocated channel\n"); - return ERR_PTR(-EINVAL); - } - if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) return NULL; - spin_lock_irqsave(&d40c->lock, flags); - - d40d = d40_prep_desc(d40c, sgl, sg_len, dma_flags); - if (d40d == NULL) - goto err; - - dev_addr = d40_get_dev_addr(d40c, direction); - - if (chan_is_logical(d40c)) - err = d40_prep_sg_log(d40c, d40d, sgl, NULL, - sg_len, direction, dev_addr); - else - err = d40_prep_sg_phy(d40c, d40d, sgl, NULL, - sg_len, direction, dev_addr); - - if (err) { - chan_err(d40c, "Failed to prepare %s slave sg job: %d\n", - chan_is_logical(d40c) ? "log" : "phy", err); - goto err; - } - - spin_unlock_irqrestore(&d40c->lock, flags); - return &d40d->txd; - -err: - if (d40d) - d40_desc_free(d40c, d40d); - spin_unlock_irqrestore(&d40c->lock, flags); - return NULL; + return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); } static enum dma_status d40_tx_status(struct dma_chan *chan, @@ -2267,7 +2235,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; base->dma_slave.device_free_chan_resources = d40_free_chan_resources; base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; - base->dma_slave.device_prep_dma_sg = d40_prep_sg; + base->dma_slave.device_prep_dma_sg = d40_prep_memcpy_sg; base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; base->dma_slave.device_tx_status = d40_tx_status; base->dma_slave.device_issue_pending = d40_issue_pending; @@ -2291,7 +2259,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; - base->dma_slave.device_prep_dma_sg = d40_prep_sg; + base->dma_slave.device_prep_dma_sg = d40_prep_memcpy_sg; base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; base->dma_memcpy.device_tx_status = d40_tx_status; base->dma_memcpy.device_issue_pending = d40_issue_pending; @@ -2322,7 +2290,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; base->dma_both.device_free_chan_resources = d40_free_chan_resources; base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; - base->dma_slave.device_prep_dma_sg = d40_prep_sg; + base->dma_slave.device_prep_dma_sg = d40_prep_memcpy_sg; base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; base->dma_both.device_tx_status = d40_tx_status; base->dma_both.device_issue_pending = d40_issue_pending; -- cgit v1.2.3 From 1c4b0927feab41346b0be971e0287aaf46eba8e0 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:24 +0100 Subject: dma40: move lli_load to main source file These register writes are better placed in the main source file rather than ll.c. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 22 ++++++++++++++++++---- drivers/dma/ste_dma40_ll.c | 26 -------------------------- drivers/dma/ste_dma40_ll.h | 5 ----- 3 files changed, 18 insertions(+), 35 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 4e9d6c5a713..6a7a00d5d68 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -504,15 +504,29 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) list_add_tail(&desc->node, &d40c->active); } +static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) +{ + struct d40_phy_lli *lli_dst = desc->lli_phy.dst; + struct d40_phy_lli *lli_src = desc->lli_phy.src; + void __iomem *base = chan_base(chan); + + writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); + writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); + writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); + writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); + + writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); + writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); + writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); + writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); +} + static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) { int curr_lcla = -EINVAL, next_lcla; if (chan_is_physical(d40c)) { - d40_phy_lli_write(d40c->base->virtbase, - d40c->phy_chan->num, - d40d->lli_phy.dst, - d40d->lli_phy.src); + d40_phy_lli_load(d40c, d40d); d40d->lli_current = d40d->lli_len; } else { diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 552c5972c75..fd752516259 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -295,32 +295,6 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, } -void d40_phy_lli_write(void __iomem *virtbase, - u32 phy_chan_num, - struct d40_phy_lli *lli_dst, - struct d40_phy_lli *lli_src) -{ - - writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG); - writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); - writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR); - writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK); - - writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG); - writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); - writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR); - writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK); - -} - /* DMA logical lli operations */ static void d40_log_lli_link(struct d40_log_lli *lli_dst, diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index a5d71714ebf..46578a661b2 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -312,11 +312,6 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, u32 data_width2, int psize); -void d40_phy_lli_write(void __iomem *virtbase, - u32 phy_chan_num, - struct d40_phy_lli *lli_dst, - struct d40_phy_lli *lli_src); - /* Logical channels */ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, -- cgit v1.2.3 From e24b36bdf873b4a64545fd66da13877214d235cf Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:25 +0100 Subject: dma40: combine duplicated code in log_sg_to_dev Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40_ll.c | 52 ++++++++++++++++++++-------------------------- 1 file changed, 23 insertions(+), 29 deletions(-) diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index fd752516259..fa6c3ab93fa 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -385,40 +385,34 @@ int d40_log_sg_to_dev(struct scatterlist *sg, struct d40_log_lli *lli_dst = lli->dst; for_each_sg(sg, current_sg, sg_len, i) { - total_size += sg_dma_len(current_sg); + dma_addr_t sg_addr = sg_dma_address(current_sg); + unsigned int len = sg_dma_len(current_sg); + dma_addr_t src; + dma_addr_t dst; + + total_size += len; if (direction == DMA_TO_DEVICE) { - lli_src = - d40_log_buf_to_lli(lli_src, - sg_dma_address(current_sg), - sg_dma_len(current_sg), - lcsp->lcsp1, src_data_width, - dst_data_width, - true); - lli_dst = - d40_log_buf_to_lli(lli_dst, - dev_addr, - sg_dma_len(current_sg), - lcsp->lcsp3, dst_data_width, - src_data_width, - false); + src = sg_addr; + dst = dev_addr; } else { - lli_dst = - d40_log_buf_to_lli(lli_dst, - sg_dma_address(current_sg), - sg_dma_len(current_sg), - lcsp->lcsp3, dst_data_width, - src_data_width, - true); - lli_src = - d40_log_buf_to_lli(lli_src, - dev_addr, - sg_dma_len(current_sg), - lcsp->lcsp1, src_data_width, - dst_data_width, - false); + src = dev_addr; + dst = sg_addr; } + + lli_src = d40_log_buf_to_lli(lli_src, src, len, + lcsp->lcsp1, + src_data_width, + dst_data_width, + src == sg_addr); + + lli_dst = d40_log_buf_to_lli(lli_dst, dst, len, + lcsp->lcsp3, + dst_data_width, + src_data_width, + dst == sg_addr); } + return total_size; } -- cgit v1.2.3 From 5ed04b8575cb22920b1333aeb55121339449048f Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:26 +0100 Subject: dma40: unify d40_log_sg_to_lli funcs for mem and slave Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 41 ++++++++++++------------------ drivers/dma/ste_dma40_ll.c | 63 +++++++++------------------------------------- drivers/dma/ste_dma40_ll.h | 10 +------- 3 files changed, 29 insertions(+), 85 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 6a7a00d5d68..c597dba713b 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1638,37 +1638,28 @@ d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, unsigned int sg_len, enum dma_data_direction direction, dma_addr_t dev_addr) { + dma_addr_t src_dev_addr = direction == DMA_FROM_DEVICE ? dev_addr : 0; + dma_addr_t dst_dev_addr = direction == DMA_TO_DEVICE ? dev_addr : 0; struct stedma40_chan_cfg *cfg = &chan->dma_cfg; struct stedma40_half_channel_info *src_info = &cfg->src_info; struct stedma40_half_channel_info *dst_info = &cfg->dst_info; + int ret; - if (direction == DMA_NONE) { - /* memcpy */ - (void) d40_log_sg_to_lli(sg_src, sg_len, - desc->lli_log.src, - chan->log_def.lcsp1, - src_info->data_width, - dst_info->data_width); - - (void) d40_log_sg_to_lli(sg_dst, sg_len, - desc->lli_log.dst, - chan->log_def.lcsp3, - dst_info->data_width, - src_info->data_width); - } else { - unsigned int total_size; + ret = d40_log_sg_to_lli(sg_src, sg_len, + src_dev_addr, + desc->lli_log.src, + chan->log_def.lcsp1, + src_info->data_width, + dst_info->data_width); - total_size = d40_log_sg_to_dev(sg_src, sg_len, - &desc->lli_log, - &chan->log_def, - src_info->data_width, - dst_info->data_width, - direction, dev_addr); - if (total_size < 0) - return -EINVAL; - } + ret = d40_log_sg_to_lli(sg_dst, sg_len, + dst_dev_addr, + desc->lli_log.dst, + chan->log_def.lcsp3, + dst_info->data_width, + src_info->data_width); - return 0; + return ret < 0 ? ret : 0; } static int diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index fa6c3ab93fa..9935c6dbcfe 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -369,53 +369,6 @@ static void d40_log_fill_lli(struct d40_log_lli *lli, } -int d40_log_sg_to_dev(struct scatterlist *sg, - int sg_len, - struct d40_log_lli_bidir *lli, - struct d40_def_lcsp *lcsp, - u32 src_data_width, - u32 dst_data_width, - enum dma_data_direction direction, - dma_addr_t dev_addr) -{ - int total_size = 0; - struct scatterlist *current_sg = sg; - int i; - struct d40_log_lli *lli_src = lli->src; - struct d40_log_lli *lli_dst = lli->dst; - - for_each_sg(sg, current_sg, sg_len, i) { - dma_addr_t sg_addr = sg_dma_address(current_sg); - unsigned int len = sg_dma_len(current_sg); - dma_addr_t src; - dma_addr_t dst; - - total_size += len; - - if (direction == DMA_TO_DEVICE) { - src = sg_addr; - dst = dev_addr; - } else { - src = dev_addr; - dst = sg_addr; - } - - lli_src = d40_log_buf_to_lli(lli_src, src, len, - lcsp->lcsp1, - src_data_width, - dst_data_width, - src == sg_addr); - - lli_dst = d40_log_buf_to_lli(lli_dst, dst, len, - lcsp->lcsp3, - dst_data_width, - src_data_width, - dst == sg_addr); - } - - return total_size; -} - struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, dma_addr_t addr, int size, @@ -447,6 +400,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, int d40_log_sg_to_lli(struct scatterlist *sg, int sg_len, + dma_addr_t dev_addr, struct d40_log_lli *lli_sg, u32 lcsp13, /* src or dst*/ u32 data_width1, u32 data_width2) @@ -455,14 +409,21 @@ int d40_log_sg_to_lli(struct scatterlist *sg, struct scatterlist *current_sg = sg; int i; struct d40_log_lli *lli = lli_sg; + bool autoinc = !dev_addr; for_each_sg(sg, current_sg, sg_len, i) { + dma_addr_t sg_addr = sg_dma_address(current_sg); + unsigned int len = sg_dma_len(current_sg); + dma_addr_t addr = dev_addr ?: sg_addr; + total_size += sg_dma_len(current_sg); - lli = d40_log_buf_to_lli(lli, - sg_dma_address(current_sg), - sg_dma_len(current_sg), + + lli = d40_log_buf_to_lli(lli, addr, len, lcsp13, - data_width1, data_width2, true); + data_width1, + data_width2, + autoinc); } + return total_size; } diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 46578a661b2..867f23f6e09 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -321,17 +321,9 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, u32 data_width1, u32 data_width2, bool addr_inc); -int d40_log_sg_to_dev(struct scatterlist *sg, - int sg_len, - struct d40_log_lli_bidir *lli, - struct d40_def_lcsp *lcsp, - u32 src_data_width, - u32 dst_data_width, - enum dma_data_direction direction, - dma_addr_t dev_addr); - int d40_log_sg_to_lli(struct scatterlist *sg, int sg_len, + dma_addr_t dev_addr, struct d40_log_lli *lli_sg, u32 lcsp13, /* src or dst*/ u32 data_width1, u32 data_width2); -- cgit v1.2.3 From cc31b6f7949efd46c5f13d0758cf7b0bcb71fae2 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:27 +0100 Subject: dma40: pass the info pointer all the way to reduce argument count Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 8 ++------ drivers/dma/ste_dma40_ll.c | 42 ++++++++++++++++++------------------------ drivers/dma/ste_dma40_ll.h | 5 ++--- 3 files changed, 22 insertions(+), 33 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index c597dba713b..3c61c582303 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1679,17 +1679,13 @@ d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, desc->lli_phy.src, virt_to_phys(desc->lli_phy.src), chan->src_def_cfg, - src_info->data_width, - dst_info->data_width, - src_info->psize); + src_info, dst_info); ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, desc->lli_phy.dst, virt_to_phys(desc->lli_phy.dst), chan->dst_def_cfg, - dst_info->data_width, - src_info->data_width, - dst_info->psize); + dst_info, src_info); dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, desc->lli_pool.size, DMA_TO_DEVICE); diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 9935c6dbcfe..509a13049d7 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -125,13 +125,14 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg, static int d40_phy_fill_lli(struct d40_phy_lli *lli, dma_addr_t data, u32 data_size, - int psize, dma_addr_t next_lli, u32 reg_cfg, bool term_int, - u32 data_width, - bool is_device) + bool is_device, + struct stedma40_half_channel_info *info) { + unsigned int data_width = info->data_width; + int psize = info->psize; int num_elems; if (psize == STEDMA40_PSIZE_PHY_1) @@ -198,16 +199,11 @@ static int d40_seg_size(int size, int data_width1, int data_width2) return seg_max; } -static struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, - dma_addr_t addr, - u32 size, - int psize, - dma_addr_t lli_phys, - u32 reg_cfg, - bool term_int, - u32 data_width1, - u32 data_width2, - bool is_device) +static struct d40_phy_lli * +d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, + dma_addr_t lli_phys, u32 reg_cfg, bool term_int, + bool is_device, struct stedma40_half_channel_info *info, + struct stedma40_half_channel_info *otherinfo) { int err; dma_addr_t next = lli_phys; @@ -215,7 +211,8 @@ static struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, int size_seg = 0; do { - size_seg = d40_seg_size(size_rest, data_width1, data_width2); + size_seg = d40_seg_size(size_rest, info->data_width, + otherinfo->data_width); size_rest -= size_seg; if (term_int && size_rest == 0) @@ -227,12 +224,11 @@ static struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, err = d40_phy_fill_lli(lli, addr, size_seg, - psize, next, reg_cfg, !next, - data_width1, - is_device); + is_device, + info); if (err) goto err; @@ -254,9 +250,8 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, struct d40_phy_lli *lli_sg, dma_addr_t lli_phys, u32 reg_cfg, - u32 data_width1, - u32 data_width2, - int psize) + struct stedma40_half_channel_info *info, + struct stedma40_half_channel_info *otherinfo) { int total_size = 0; int i; @@ -280,13 +275,12 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, lli = d40_phy_buf_to_lli(lli, dst, sg_dma_len(current_sg), - psize, l_phys, reg_cfg, sg_len - 1 == i, - data_width1, - data_width2, - target == dst); + target == dst, + info, + otherinfo); if (lli == NULL) return -EINVAL; } diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 867f23f6e09..cd94afd9c2f 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -308,9 +308,8 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, struct d40_phy_lli *lli, dma_addr_t lli_phys, u32 reg_cfg, - u32 data_width1, - u32 data_width2, - int psize); + struct stedma40_half_channel_info *info, + struct stedma40_half_channel_info *otherinfo); /* Logical channels */ -- cgit v1.2.3 From 822c567639971628ceba2c53531670d595e3164d Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:28 +0100 Subject: dma40: unify src/dst addr check Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 3c61c582303..d9dfda2c65b 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1635,11 +1635,9 @@ static u32 stedma40_residue(struct dma_chan *chan) static int d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, struct scatterlist *sg_src, struct scatterlist *sg_dst, - unsigned int sg_len, enum dma_data_direction direction, - dma_addr_t dev_addr) + unsigned int sg_len, dma_addr_t src_dev_addr, + dma_addr_t dst_dev_addr) { - dma_addr_t src_dev_addr = direction == DMA_FROM_DEVICE ? dev_addr : 0; - dma_addr_t dst_dev_addr = direction == DMA_TO_DEVICE ? dev_addr : 0; struct stedma40_chan_cfg *cfg = &chan->dma_cfg; struct stedma40_half_channel_info *src_info = &cfg->src_info; struct stedma40_half_channel_info *dst_info = &cfg->dst_info; @@ -1665,11 +1663,9 @@ d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, static int d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, struct scatterlist *sg_src, struct scatterlist *sg_dst, - unsigned int sg_len, enum dma_data_direction direction, - dma_addr_t dev_addr) + unsigned int sg_len, dma_addr_t src_dev_addr, + dma_addr_t dst_dev_addr) { - dma_addr_t src_dev_addr = direction == DMA_FROM_DEVICE ? dev_addr : 0; - dma_addr_t dst_dev_addr = direction == DMA_TO_DEVICE ? dev_addr : 0; struct stedma40_chan_cfg *cfg = &chan->dma_cfg; struct stedma40_half_channel_info *src_info = &cfg->src_info; struct stedma40_half_channel_info *dst_info = &cfg->dst_info; @@ -1757,7 +1753,8 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, enum dma_data_direction direction, unsigned long dma_flags) { struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); - dma_addr_t dev_addr = 0; + dma_addr_t src_dev_addr = 0; + dma_addr_t dst_dev_addr = 0; struct d40_desc *desc; unsigned long flags; int ret; @@ -1773,15 +1770,21 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, if (desc == NULL) goto err; - if (direction != DMA_NONE) - dev_addr = d40_get_dev_addr(chan, direction); + if (direction != DMA_NONE) { + dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); + + if (direction == DMA_FROM_DEVICE) + src_dev_addr = dev_addr; + else if (direction == DMA_TO_DEVICE) + dst_dev_addr = dev_addr; + } if (chan_is_logical(chan)) ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, - sg_len, direction, dev_addr); + sg_len, src_dev_addr, dst_dev_addr); else ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, - sg_len, direction, dev_addr); + sg_len, src_dev_addr, dst_dev_addr); if (ret) { chan_err(chan, "Failed to prepare %s sg job: %d\n", -- cgit v1.2.3 From 1f7622ca55b1f5875e32140b4781759f800aded3 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:29 +0100 Subject: dma40: make d40_log_buf_to_lli static Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40_ll.c | 2 +- drivers/dma/ste_dma40_ll.h | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 509a13049d7..876aad2c838 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -363,7 +363,7 @@ static void d40_log_fill_lli(struct d40_log_lli *lli, } -struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, +static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, dma_addr_t addr, int size, u32 lcsp13, /* src or dst*/ diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index cd94afd9c2f..4626c887437 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -313,13 +313,6 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, /* Logical channels */ -struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, - dma_addr_t addr, - int size, - u32 lcsp13, /* src or dst*/ - u32 data_width1, u32 data_width2, - bool addr_inc); - int d40_log_sg_to_lli(struct scatterlist *sg, int sg_len, dma_addr_t dev_addr, -- cgit v1.2.3 From 7f933bed96e9872131014ea2bdd5b012e43fc316 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:30 +0100 Subject: dma40: use flags to reduce parameter count Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40_ll.c | 84 ++++++++++++++++++++++++++-------------------- drivers/dma/ste_dma40_ll.h | 5 +++ 2 files changed, 52 insertions(+), 37 deletions(-) diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 876aad2c838..88b9e371be2 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -127,10 +127,11 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli, u32 data_size, dma_addr_t next_lli, u32 reg_cfg, - bool term_int, - bool is_device, - struct stedma40_half_channel_info *info) + struct stedma40_half_channel_info *info, + unsigned int flags) { + bool addr_inc = flags & LLI_ADDR_INC; + bool term_int = flags & LLI_TERM_INT; unsigned int data_width = info->data_width; int psize = info->psize; int num_elems; @@ -155,7 +156,7 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli, * Distance to next element sized entry. * Usually the size of the element unless you want gaps. */ - if (!is_device) + if (addr_inc) lli->reg_elt |= (0x1 << data_width) << D40_SREG_ELEM_PHY_EIDX_POS; @@ -201,40 +202,45 @@ static int d40_seg_size(int size, int data_width1, int data_width2) static struct d40_phy_lli * d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, - dma_addr_t lli_phys, u32 reg_cfg, bool term_int, - bool is_device, struct stedma40_half_channel_info *info, - struct stedma40_half_channel_info *otherinfo) + dma_addr_t lli_phys, u32 reg_cfg, + struct stedma40_half_channel_info *info, + struct stedma40_half_channel_info *otherinfo, + unsigned long flags) { + bool addr_inc = flags & LLI_ADDR_INC; + bool term_int = flags & LLI_TERM_INT; int err; dma_addr_t next = lli_phys; int size_rest = size; int size_seg = 0; + /* + * This piece may be split up based on d40_seg_size(); we only want the + * term int on the last part. + */ + if (term_int) + flags &= ~LLI_TERM_INT; + do { size_seg = d40_seg_size(size_rest, info->data_width, otherinfo->data_width); size_rest -= size_seg; - if (term_int && size_rest == 0) + if (term_int && size_rest == 0) { next = 0; - else + flags |= LLI_TERM_INT; + } else next = ALIGN(next + sizeof(struct d40_phy_lli), D40_LLI_ALIGN); - err = d40_phy_fill_lli(lli, - addr, - size_seg, - next, - reg_cfg, - !next, - is_device, - info); + err = d40_phy_fill_lli(lli, addr, size_seg, next, + reg_cfg, info, flags); if (err) goto err; lli++; - if (!is_device) + if (addr_inc) addr += size_seg; } while (size_rest); @@ -256,31 +262,29 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, int total_size = 0; int i; struct scatterlist *current_sg = sg; - dma_addr_t dst; struct d40_phy_lli *lli = lli_sg; dma_addr_t l_phys = lli_phys; + unsigned long flags = 0; + + if (!target) + flags |= LLI_ADDR_INC; for_each_sg(sg, current_sg, sg_len, i) { + dma_addr_t sg_addr = sg_dma_address(current_sg); + unsigned int len = sg_dma_len(current_sg); + dma_addr_t dst = target ?: sg_addr; total_size += sg_dma_len(current_sg); - if (target) - dst = target; - else - dst = sg_dma_address(current_sg); + if (i == sg_len - 1) + flags |= LLI_TERM_INT; l_phys = ALIGN(lli_phys + (lli - lli_sg) * sizeof(struct d40_phy_lli), D40_LLI_ALIGN); - lli = d40_phy_buf_to_lli(lli, - dst, - sg_dma_len(current_sg), - l_phys, - reg_cfg, - sg_len - 1 == i, - target == dst, - info, - otherinfo); + lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, + reg_cfg, info, otherinfo, flags); + if (lli == NULL) return -EINVAL; } @@ -343,8 +347,10 @@ static void d40_log_fill_lli(struct d40_log_lli *lli, dma_addr_t data, u32 data_size, u32 reg_cfg, u32 data_width, - bool addr_inc) + unsigned int flags) { + bool addr_inc = flags & LLI_ADDR_INC; + lli->lcsp13 = reg_cfg; /* The number of elements to transfer */ @@ -369,8 +375,9 @@ static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, u32 lcsp13, /* src or dst*/ u32 data_width1, u32 data_width2, - bool addr_inc) + unsigned int flags) { + bool addr_inc = flags & LLI_ADDR_INC; struct d40_log_lli *lli = lli_sg; int size_rest = size; int size_seg = 0; @@ -383,7 +390,7 @@ static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, addr, size_seg, lcsp13, data_width1, - addr_inc); + flags); if (addr_inc) addr += size_seg; lli++; @@ -403,7 +410,10 @@ int d40_log_sg_to_lli(struct scatterlist *sg, struct scatterlist *current_sg = sg; int i; struct d40_log_lli *lli = lli_sg; - bool autoinc = !dev_addr; + unsigned long flags = 0; + + if (!dev_addr) + flags |= LLI_ADDR_INC; for_each_sg(sg, current_sg, sg_len, i) { dma_addr_t sg_addr = sg_dma_address(current_sg); @@ -416,7 +426,7 @@ int d40_log_sg_to_lli(struct scatterlist *sg, lcsp13, data_width1, data_width2, - autoinc); + flags); } return total_size; diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 4626c887437..59e72f0cc90 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -293,6 +293,11 @@ struct d40_def_lcsp { /* Physical channels */ +enum d40_lli_flags { + LLI_ADDR_INC = 1 << 0, + LLI_TERM_INT = 1 << 1, +}; + void d40_phy_cfg(struct stedma40_chan_cfg *cfg, u32 *src_cfg, u32 *dst_cfg, -- cgit v1.2.3 From e65889c75ccb5b64dfb60f32e2d9448446cabcc7 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:31 +0100 Subject: dma40: extract lcla code into separate function Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 92 +++++++++++++++++++++++++++---------------------- 1 file changed, 50 insertions(+), 42 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index d9dfda2c65b..4ec96ac1e41 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -521,54 +521,62 @@ static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); } -static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) -{ - int curr_lcla = -EINVAL, next_lcla; - - if (chan_is_physical(d40c)) { - d40_phy_lli_load(d40c, d40d); - d40d->lli_current = d40d->lli_len; - } else { - - if ((d40d->lli_len - d40d->lli_current) > 1) - curr_lcla = d40_lcla_alloc_one(d40c, d40d); - - d40_log_lli_lcpa_write(d40c->lcpa, - &d40d->lli_log.dst[d40d->lli_current], - &d40d->lli_log.src[d40d->lli_current], - curr_lcla); - - d40d->lli_current++; - for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) { - unsigned int lcla_offset = d40c->phy_chan->num * 1024 + - 8 * curr_lcla * 2; - struct d40_lcla_pool *pool = &d40c->base->lcla_pool; - struct d40_log_lli *lcla = pool->base + lcla_offset; - - if (d40d->lli_current + 1 < d40d->lli_len) - next_lcla = d40_lcla_alloc_one(d40c, d40d); - else - next_lcla = -EINVAL; - - d40_log_lli_lcla_write(lcla, - &d40d->lli_log.dst[d40d->lli_current], - &d40d->lli_log.src[d40d->lli_current], - next_lcla); +static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) +{ + struct d40_lcla_pool *pool = &chan->base->lcla_pool; + struct d40_log_lli_bidir *lli = &desc->lli_log; + int lli_current = desc->lli_current; + int lli_len = desc->lli_len; + int curr_lcla = -EINVAL; + + if (lli_len - lli_current > 1) + curr_lcla = d40_lcla_alloc_one(chan, desc); + + d40_log_lli_lcpa_write(chan->lcpa, + &lli->dst[lli_current], + &lli->src[lli_current], + curr_lcla); + + lli_current++; + for (; lli_current < lli_len; lli_current++) { + unsigned int lcla_offset = chan->phy_chan->num * 1024 + + 8 * curr_lcla * 2; + struct d40_log_lli *lcla = pool->base + lcla_offset; + int next_lcla; + + if (lli_current + 1 < lli_len) + next_lcla = d40_lcla_alloc_one(chan, desc); + else + next_lcla = -EINVAL; - dma_sync_single_range_for_device(d40c->base->dev, - pool->dma_addr, lcla_offset, - 2 * sizeof(struct d40_log_lli), - DMA_TO_DEVICE); + d40_log_lli_lcla_write(lcla, + &lli->dst[lli_current], + &lli->src[lli_current], + next_lcla); - curr_lcla = next_lcla; + dma_sync_single_range_for_device(chan->base->dev, + pool->dma_addr, lcla_offset, + 2 * sizeof(struct d40_log_lli), + DMA_TO_DEVICE); - if (curr_lcla == -EINVAL) { - d40d->lli_current++; - break; - } + curr_lcla = next_lcla; + if (curr_lcla == -EINVAL) { + lli_current++; + break; } } + + desc->lli_current = lli_current; +} + +static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) +{ + if (chan_is_physical(d40c)) { + d40_phy_lli_load(d40c, d40d); + d40d->lli_current = d40d->lli_len; + } else + d40_log_lli_to_lcxa(d40c, d40d); } static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) -- cgit v1.2.3 From 6045f0bb2818393a44e835454db96709cb5b3d80 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:32 +0100 Subject: dma40: handle failure to allocate first LCLA Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 4ec96ac1e41..b8cce85a955 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -538,6 +538,10 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) curr_lcla); lli_current++; + + if (curr_lcla < 0) + goto out; + for (; lli_current < lli_len; lli_current++) { unsigned int lcla_offset = chan->phy_chan->num * 1024 + 8 * curr_lcla * 2; @@ -567,6 +571,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) } } +out: desc->lli_current = lli_current; } -- cgit v1.2.3 From 7ad74a7cf6f6355fd3f4c15afe63460fc4ec3f57 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:33 +0100 Subject: dma40: fix DMA_SG capability and channels The DMA_SG cap is enabled on the wrong channel, and the pointers are repeatedly set incorrectly. Fix it and combine the ops settings to a common function. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 71 +++++++++++++++++++++++-------------------------- 1 file changed, 34 insertions(+), 37 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index b8cce85a955..929fd8f45a2 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2238,6 +2238,32 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, } } +static void d40_ops_init(struct d40_base *base, struct dma_device *dev) +{ + if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) + dev->device_prep_slave_sg = d40_prep_slave_sg; + + if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { + dev->device_prep_dma_memcpy = d40_prep_memcpy; + + /* + * This controller can only access address at even + * 32bit boundaries, i.e. 2^2 + */ + dev->copy_align = 2; + } + + if (dma_has_cap(DMA_SG, dev->cap_mask)) + dev->device_prep_dma_sg = d40_prep_memcpy_sg; + + dev->device_alloc_chan_resources = d40_alloc_chan_resources; + dev->device_free_chan_resources = d40_free_chan_resources; + dev->device_issue_pending = d40_issue_pending; + dev->device_tx_status = d40_tx_status; + dev->device_control = d40_control; + dev->dev = base->dev; +} + static int __init d40_dmaengine_init(struct d40_base *base, int num_reserved_chans) { @@ -2249,15 +2275,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, dma_cap_zero(base->dma_slave.cap_mask); dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); - base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; - base->dma_slave.device_free_chan_resources = d40_free_chan_resources; - base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; - base->dma_slave.device_prep_dma_sg = d40_prep_memcpy_sg; - base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; - base->dma_slave.device_tx_status = d40_tx_status; - base->dma_slave.device_issue_pending = d40_issue_pending; - base->dma_slave.device_control = d40_control; - base->dma_slave.dev = base->dev; + d40_ops_init(base, &base->dma_slave); err = dma_async_device_register(&base->dma_slave); @@ -2271,22 +2289,9 @@ static int __init d40_dmaengine_init(struct d40_base *base, dma_cap_zero(base->dma_memcpy.cap_mask); dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); - dma_cap_set(DMA_SG, base->dma_slave.cap_mask); - - base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; - base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; - base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; - base->dma_slave.device_prep_dma_sg = d40_prep_memcpy_sg; - base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; - base->dma_memcpy.device_tx_status = d40_tx_status; - base->dma_memcpy.device_issue_pending = d40_issue_pending; - base->dma_memcpy.device_control = d40_control; - base->dma_memcpy.dev = base->dev; - /* - * This controller can only access address at even - * 32bit boundaries, i.e. 2^2 - */ - base->dma_memcpy.copy_align = 2; + dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); + + d40_ops_init(base, &base->dma_memcpy); err = dma_async_device_register(&base->dma_memcpy); @@ -2302,18 +2307,10 @@ static int __init d40_dmaengine_init(struct d40_base *base, dma_cap_zero(base->dma_both.cap_mask); dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); - dma_cap_set(DMA_SG, base->dma_slave.cap_mask); - - base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; - base->dma_both.device_free_chan_resources = d40_free_chan_resources; - base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; - base->dma_slave.device_prep_dma_sg = d40_prep_memcpy_sg; - base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; - base->dma_both.device_tx_status = d40_tx_status; - base->dma_both.device_issue_pending = d40_issue_pending; - base->dma_both.device_control = d40_control; - base->dma_both.dev = base->dev; - base->dma_both.copy_align = 2; + dma_cap_set(DMA_SG, base->dma_both.cap_mask); + + d40_ops_init(base, &base->dma_both); + err = dma_async_device_register(&base->dma_both); if (err) { -- cgit v1.2.3 From 86eb5fb61125e4646c9447a1f2ce130817dab34e Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:34 +0100 Subject: dma40: stop ongoing transfers in DMA_TERMINATE_ALL The current implementation of DMA_TERMINATE_ALL leaves ongoing transfers running. Fix it. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 929fd8f45a2..8fd0bb94e77 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -918,10 +918,8 @@ static bool d40_tx_is_linked(struct d40_chan *d40c) return is_link; } -static int d40_pause(struct dma_chan *chan) +static int d40_pause(struct d40_chan *d40c) { - struct d40_chan *d40c = - container_of(chan, struct d40_chan, chan); int res = 0; unsigned long flags; @@ -945,10 +943,8 @@ static int d40_pause(struct dma_chan *chan) return res; } -static int d40_resume(struct dma_chan *chan) +static int d40_resume(struct d40_chan *d40c) { - struct d40_chan *d40c = - container_of(chan, struct d40_chan, chan); int res = 0; unsigned long flags; @@ -978,6 +974,22 @@ no_suspend: return res; } +static int d40_terminate_all(struct d40_chan *chan) +{ + unsigned long flags; + int ret = 0; + + ret = d40_pause(chan); + if (!ret && chan_is_physical(chan)) + ret = d40_channel_execute_command(chan, D40_DMA_STOP); + + spin_lock_irqsave(&chan->lock, flags); + d40_term_all(chan); + spin_unlock_irqrestore(&chan->lock, flags); + + return ret; +} + static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) { struct d40_chan *d40c = container_of(tx->chan, @@ -2176,7 +2188,6 @@ static void d40_set_runtime_config(struct dma_chan *chan, static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { - unsigned long flags; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); if (d40c->phy_chan == NULL) { @@ -2186,14 +2197,11 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, switch (cmd) { case DMA_TERMINATE_ALL: - spin_lock_irqsave(&d40c->lock, flags); - d40_term_all(d40c); - spin_unlock_irqrestore(&d40c->lock, flags); - return 0; + return d40_terminate_all(d40c); case DMA_PAUSE: - return d40_pause(chan); + return d40_pause(d40c); case DMA_RESUME: - return d40_resume(chan); + return d40_resume(d40c); case DMA_SLAVE_CONFIG: d40_set_runtime_config(chan, (struct dma_slave_config *) arg); -- cgit v1.2.3 From 0c842b551063c5f7382ac9b457992f3b34972801 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:35 +0100 Subject: dma40: cyclic xfer support Support cyclic transfers, which are useful for ALSA drivers. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 172 ++++++++++++++++++++++++++++++++++++--------- drivers/dma/ste_dma40_ll.c | 35 +++++---- drivers/dma/ste_dma40_ll.h | 9 ++- 3 files changed, 167 insertions(+), 49 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 8fd0bb94e77..af955de035f 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -115,6 +115,7 @@ struct d40_desc { struct list_head node; bool is_in_client_list; + bool cyclic; }; /** @@ -527,17 +528,45 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) struct d40_log_lli_bidir *lli = &desc->lli_log; int lli_current = desc->lli_current; int lli_len = desc->lli_len; + bool cyclic = desc->cyclic; int curr_lcla = -EINVAL; + int first_lcla = 0; + bool linkback; - if (lli_len - lli_current > 1) + /* + * We may have partially running cyclic transfers, in case we did't get + * enough LCLA entries. + */ + linkback = cyclic && lli_current == 0; + + /* + * For linkback, we need one LCLA even with only one link, because we + * can't link back to the one in LCPA space + */ + if (linkback || (lli_len - lli_current > 1)) { curr_lcla = d40_lcla_alloc_one(chan, desc); + first_lcla = curr_lcla; + } + + /* + * For linkback, we normally load the LCPA in the loop since we need to + * link it to the second LCLA and not the first. However, if we + * couldn't even get a first LCLA, then we have to run in LCPA and + * reload manually. + */ + if (!linkback || curr_lcla == -EINVAL) { + unsigned int flags = 0; - d40_log_lli_lcpa_write(chan->lcpa, - &lli->dst[lli_current], - &lli->src[lli_current], - curr_lcla); + if (curr_lcla == -EINVAL) + flags |= LLI_TERM_INT; - lli_current++; + d40_log_lli_lcpa_write(chan->lcpa, + &lli->dst[lli_current], + &lli->src[lli_current], + curr_lcla, + flags); + lli_current++; + } if (curr_lcla < 0) goto out; @@ -546,17 +575,33 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) unsigned int lcla_offset = chan->phy_chan->num * 1024 + 8 * curr_lcla * 2; struct d40_log_lli *lcla = pool->base + lcla_offset; + unsigned int flags = 0; int next_lcla; if (lli_current + 1 < lli_len) next_lcla = d40_lcla_alloc_one(chan, desc); else - next_lcla = -EINVAL; + next_lcla = linkback ? first_lcla : -EINVAL; + + if (cyclic || next_lcla == -EINVAL) + flags |= LLI_TERM_INT; + if (linkback && curr_lcla == first_lcla) { + /* First link goes in both LCPA and LCLA */ + d40_log_lli_lcpa_write(chan->lcpa, + &lli->dst[lli_current], + &lli->src[lli_current], + next_lcla, flags); + } + + /* + * One unused LCLA in the cyclic case if the very first + * next_lcla fails... + */ d40_log_lli_lcla_write(lcla, &lli->dst[lli_current], &lli->src[lli_current], - next_lcla); + next_lcla, flags); dma_sync_single_range_for_device(chan->base->dev, pool->dma_addr, lcla_offset, @@ -565,7 +610,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) curr_lcla = next_lcla; - if (curr_lcla == -EINVAL) { + if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { lli_current++; break; } @@ -1074,17 +1119,36 @@ static void dma_tc_handle(struct d40_chan *d40c) if (d40d == NULL) return; - d40_lcla_free_all(d40c, d40d); + if (d40d->cyclic) { + /* + * If this was a paritially loaded list, we need to reloaded + * it, and only when the list is completed. We need to check + * for done because the interrupt will hit for every link, and + * not just the last one. + */ + if (d40d->lli_current < d40d->lli_len + && !d40_tx_is_linked(d40c) + && !d40_residue(d40c)) { + d40_lcla_free_all(d40c, d40d); + d40_desc_load(d40c, d40d); + (void) d40_start(d40c); - if (d40d->lli_current < d40d->lli_len) { - d40_desc_load(d40c, d40d); - /* Start dma job */ - (void) d40_start(d40c); - return; - } + if (d40d->lli_current == d40d->lli_len) + d40d->lli_current = 0; + } + } else { + d40_lcla_free_all(d40c, d40d); - if (d40_queue_start(d40c) == NULL) - d40c->busy = false; + if (d40d->lli_current < d40d->lli_len) { + d40_desc_load(d40c, d40d); + /* Start dma job */ + (void) d40_start(d40c); + return; + } + + if (d40_queue_start(d40c) == NULL) + d40c->busy = false; + } d40c->pending_tx++; tasklet_schedule(&d40c->tasklet); @@ -1103,11 +1167,11 @@ static void dma_tasklet(unsigned long data) /* Get first active entry from list */ d40d = d40_first_active_get(d40c); - if (d40d == NULL) goto err; - d40c->completed = d40d->txd.cookie; + if (!d40d->cyclic) + d40c->completed = d40d->txd.cookie; /* * If terminating a channel pending_tx is set to zero. @@ -1122,16 +1186,18 @@ static void dma_tasklet(unsigned long data) callback = d40d->txd.callback; callback_param = d40d->txd.callback_param; - if (async_tx_test_ack(&d40d->txd)) { - d40_pool_lli_free(d40c, d40d); - d40_desc_remove(d40d); - d40_desc_free(d40c, d40d); - } else { - if (!d40d->is_in_client_list) { + if (!d40d->cyclic) { + if (async_tx_test_ack(&d40d->txd)) { + d40_pool_lli_free(d40c, d40d); d40_desc_remove(d40d); - d40_lcla_free_all(d40c, d40d); - list_add_tail(&d40d->node, &d40c->client); - d40d->is_in_client_list = true; + d40_desc_free(d40c, d40d); + } else { + if (!d40d->is_in_client_list) { + d40_desc_remove(d40d); + d40_lcla_free_all(d40c, d40d); + list_add_tail(&d40d->node, &d40c->client); + d40d->is_in_client_list = true; + } } } @@ -1694,19 +1760,23 @@ d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, struct stedma40_chan_cfg *cfg = &chan->dma_cfg; struct stedma40_half_channel_info *src_info = &cfg->src_info; struct stedma40_half_channel_info *dst_info = &cfg->dst_info; + unsigned long flags = 0; int ret; + if (desc->cyclic) + flags |= LLI_CYCLIC | LLI_TERM_INT; + ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, desc->lli_phy.src, virt_to_phys(desc->lli_phy.src), chan->src_def_cfg, - src_info, dst_info); + src_info, dst_info, flags); ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, desc->lli_phy.dst, virt_to_phys(desc->lli_phy.dst), chan->dst_def_cfg, - dst_info, src_info); + dst_info, src_info, flags); dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, desc->lli_pool.size, DMA_TO_DEVICE); @@ -1789,12 +1859,16 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, return NULL; } + spin_lock_irqsave(&chan->lock, flags); desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); if (desc == NULL) goto err; + if (sg_next(&sg_src[sg_len - 1]) == sg_src) + desc->cyclic = true; + if (direction != DMA_NONE) { dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); @@ -2007,6 +2081,36 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); } +static struct dma_async_tx_descriptor * +dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, + size_t buf_len, size_t period_len, + enum dma_data_direction direction) +{ + unsigned int periods = buf_len / period_len; + struct dma_async_tx_descriptor *txd; + struct scatterlist *sg; + int i; + + sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL); + for (i = 0; i < periods; i++) { + sg_dma_address(&sg[i]) = dma_addr; + sg_dma_len(&sg[i]) = period_len; + dma_addr += period_len; + } + + sg[periods].offset = 0; + sg[periods].length = 0; + sg[periods].page_link = + ((unsigned long)sg | 0x01) & ~0x02; + + txd = d40_prep_sg(chan, sg, sg, periods, direction, + DMA_PREP_INTERRUPT); + + kfree(sg); + + return txd; +} + static enum dma_status d40_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) @@ -2264,6 +2368,9 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev) if (dma_has_cap(DMA_SG, dev->cap_mask)) dev->device_prep_dma_sg = d40_prep_memcpy_sg; + if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) + dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; + dev->device_alloc_chan_resources = d40_alloc_chan_resources; dev->device_free_chan_resources = d40_free_chan_resources; dev->device_issue_pending = d40_issue_pending; @@ -2282,6 +2389,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, dma_cap_zero(base->dma_slave.cap_mask); dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); + dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); d40_ops_init(base, &base->dma_slave); @@ -2316,9 +2424,9 @@ static int __init d40_dmaengine_init(struct d40_base *base, dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); dma_cap_set(DMA_SG, base->dma_both.cap_mask); + dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); d40_ops_init(base, &base->dma_both); - err = dma_async_device_register(&base->dma_both); if (err) { diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 88b9e371be2..cad9e1daedf 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -202,13 +202,15 @@ static int d40_seg_size(int size, int data_width1, int data_width2) static struct d40_phy_lli * d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, - dma_addr_t lli_phys, u32 reg_cfg, + dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg, struct stedma40_half_channel_info *info, struct stedma40_half_channel_info *otherinfo, unsigned long flags) { + bool lastlink = flags & LLI_LAST_LINK; bool addr_inc = flags & LLI_ADDR_INC; bool term_int = flags & LLI_TERM_INT; + bool cyclic = flags & LLI_CYCLIC; int err; dma_addr_t next = lli_phys; int size_rest = size; @@ -226,10 +228,12 @@ d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, otherinfo->data_width); size_rest -= size_seg; - if (term_int && size_rest == 0) { - next = 0; + if (size_rest == 0 && term_int) flags |= LLI_TERM_INT; - } else + + if (size_rest == 0 && lastlink) + next = cyclic ? first_phys : 0; + else next = ALIGN(next + sizeof(struct d40_phy_lli), D40_LLI_ALIGN); @@ -257,14 +261,14 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, dma_addr_t lli_phys, u32 reg_cfg, struct stedma40_half_channel_info *info, - struct stedma40_half_channel_info *otherinfo) + struct stedma40_half_channel_info *otherinfo, + unsigned long flags) { int total_size = 0; int i; struct scatterlist *current_sg = sg; struct d40_phy_lli *lli = lli_sg; dma_addr_t l_phys = lli_phys; - unsigned long flags = 0; if (!target) flags |= LLI_ADDR_INC; @@ -277,12 +281,12 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, total_size += sg_dma_len(current_sg); if (i == sg_len - 1) - flags |= LLI_TERM_INT; + flags |= LLI_TERM_INT | LLI_LAST_LINK; l_phys = ALIGN(lli_phys + (lli - lli_sg) * sizeof(struct d40_phy_lli), D40_LLI_ALIGN); - lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, + lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys, reg_cfg, info, otherinfo, flags); if (lli == NULL) @@ -297,15 +301,18 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, static void d40_log_lli_link(struct d40_log_lli *lli_dst, struct d40_log_lli *lli_src, - int next) + int next, unsigned int flags) { + bool interrupt = flags & LLI_TERM_INT; u32 slos = 0; u32 dlos = 0; if (next != -EINVAL) { slos = next * 2; dlos = next * 2 + 1; - } else { + } + + if (interrupt) { lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; } @@ -320,9 +327,9 @@ static void d40_log_lli_link(struct d40_log_lli *lli_dst, void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, struct d40_log_lli *lli_dst, struct d40_log_lli *lli_src, - int next) + int next, unsigned int flags) { - d40_log_lli_link(lli_dst, lli_src, next); + d40_log_lli_link(lli_dst, lli_src, next, flags); writel(lli_src->lcsp02, &lcpa[0].lcsp0); writel(lli_src->lcsp13, &lcpa[0].lcsp1); @@ -333,9 +340,9 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, void d40_log_lli_lcla_write(struct d40_log_lli *lcla, struct d40_log_lli *lli_dst, struct d40_log_lli *lli_src, - int next) + int next, unsigned int flags) { - d40_log_lli_link(lli_dst, lli_src, next); + d40_log_lli_link(lli_dst, lli_src, next, flags); writel(lli_src->lcsp02, &lcla[0].lcsp02); writel(lli_src->lcsp13, &lcla[0].lcsp13); diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 59e72f0cc90..195ee65ee7f 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -296,6 +296,8 @@ struct d40_def_lcsp { enum d40_lli_flags { LLI_ADDR_INC = 1 << 0, LLI_TERM_INT = 1 << 1, + LLI_CYCLIC = 1 << 2, + LLI_LAST_LINK = 1 << 3, }; void d40_phy_cfg(struct stedma40_chan_cfg *cfg, @@ -314,7 +316,8 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, dma_addr_t lli_phys, u32 reg_cfg, struct stedma40_half_channel_info *info, - struct stedma40_half_channel_info *otherinfo); + struct stedma40_half_channel_info *otherinfo, + unsigned long flags); /* Logical channels */ @@ -328,11 +331,11 @@ int d40_log_sg_to_lli(struct scatterlist *sg, void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, struct d40_log_lli *lli_dst, struct d40_log_lli *lli_src, - int next); + int next, unsigned int flags); void d40_log_lli_lcla_write(struct d40_log_lli *lcla, struct d40_log_lli *lli_dst, struct d40_log_lli *lli_src, - int next); + int next, unsigned int flags); #endif /* STE_DMA40_LLI_H */ -- cgit v1.2.3 From c5a9f9d0895b2c16908979244d3d678fd6db0545 Mon Sep 17 00:00:00 2001 From: Tomoya MORINAGA Date: Fri, 18 Feb 2011 10:01:20 +0530 Subject: pch_dma: fix kernel error issue fix the following kernel error ------------[ cut here ]------------ WARNING: at kernel/softirq.c:159 _local_bh_enable_ip.clone.5+0x35/0x71() Hardware name: To be filled by O.E.M. Modules linked in: pch_uart pch_dma fuse mga drm cpufreq_ondemand acpi_cpufreq mperf ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6 ip6table_filter ip6_tables ipv6 uinput snd_hda_codec_realtek snd_hda_intel snd_hda_codec matroxfb_base snd_hwdep 8250_pnp snd_seq snd_seq_device matroxfb_DAC1064 snd_pcm joydev 8250 matroxfb_accel snd_timer matroxfb_Ti3026 ppdev pegasus parport_pc snd parport matroxfb_g450 g450_pll serial_core video output matroxfb_misc soundcore snd_page_alloc serio_raw pcspkr ext4 jbd2 crc16 sdhci_pci sdhci mmc_core floppy [last unloaded: scsi_wait_scan] Pid: 0, comm: swapper Not tainted 2.6.37.upstream_check+ #8 Call Trace: [] warn_slowpath_common+0x65/0x7a [] ? _local_bh_enable_ip.clone.5+0x35/0x71 [] warn_slowpath_null+0xf/0x13 [] _local_bh_enable_ip.clone.5+0x35/0x71 [] local_bh_enable_ip+0x8/0xa [] _raw_spin_unlock_bh+0x10/0x12 [] pd_prep_slave_sg+0xba/0x200 [pch_dma] [] pch_uart_interrupt+0x44d/0x6aa [pch_uart] [] handle_IRQ_event+0x1d/0x9e [] handle_fasteoi_irq+0x90/0xc7 [] ? handle_fasteoi_irq+0x0/0xc7 [] ? do_IRQ+0x3e/0x89 [] ? common_interrupt+0x29/0x30 [] ? sys_getpriority+0x12d/0x1a2 [] ? arch_local_irq_enable+0x5/0xb [] ? acpi_idle_enter_bm+0x22a/0x261 [] ? cpuidle_idle_call+0x70/0xa1 [] ? cpu_idle+0x49/0x6a [] ? rest_init+0x58/0x5a [] ? start_kernel+0x2d0/0x2d5 [] ? i386_start_kernel+0xce/0xd5 Signed-off-by: Tomoya MORINAGA Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 1c38418ae61..bf2ddd601dc 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -366,7 +366,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); dma_cookie_t cookie; - spin_lock_bh(&pd_chan->lock); + spin_lock(&pd_chan->lock); cookie = pdc_assign_cookie(pd_chan, desc); if (list_empty(&pd_chan->active_list)) { @@ -376,7 +376,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) list_add_tail(&desc->desc_node, &pd_chan->queue); } - spin_unlock_bh(&pd_chan->lock); + spin_unlock(&pd_chan->lock); return 0; } @@ -386,7 +386,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) struct pch_dma *pd = to_pd(chan->device); dma_addr_t addr; - desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr); + desc = pci_pool_alloc(pd->pool, flags, &addr); if (desc) { memset(desc, 0, sizeof(struct pch_dma_desc)); INIT_LIST_HEAD(&desc->tx_list); @@ -405,7 +405,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) struct pch_dma_desc *ret = NULL; int i; - spin_lock_bh(&pd_chan->lock); + spin_lock(&pd_chan->lock); list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { i++; if (async_tx_test_ack(&desc->txd)) { @@ -415,15 +415,15 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) } dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); } - spin_unlock_bh(&pd_chan->lock); + spin_unlock(&pd_chan->lock); dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); if (!ret) { ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); if (ret) { - spin_lock_bh(&pd_chan->lock); + spin_lock(&pd_chan->lock); pd_chan->descs_allocated++; - spin_unlock_bh(&pd_chan->lock); + spin_unlock(&pd_chan->lock); } else { dev_err(chan2dev(&pd_chan->chan), "failed to alloc desc\n"); @@ -437,10 +437,10 @@ static void pdc_desc_put(struct pch_dma_chan *pd_chan, struct pch_dma_desc *desc) { if (desc) { - spin_lock_bh(&pd_chan->lock); + spin_lock(&pd_chan->lock); list_splice_init(&desc->tx_list, &pd_chan->free_list); list_add(&desc->desc_node, &pd_chan->free_list); - spin_unlock_bh(&pd_chan->lock); + spin_unlock(&pd_chan->lock); } } @@ -530,9 +530,9 @@ static void pd_issue_pending(struct dma_chan *chan) struct pch_dma_chan *pd_chan = to_pd_chan(chan); if (pdc_is_idle(pd_chan)) { - spin_lock_bh(&pd_chan->lock); + spin_lock(&pd_chan->lock); pdc_advance_work(pd_chan); - spin_unlock_bh(&pd_chan->lock); + spin_unlock(&pd_chan->lock); } } @@ -592,7 +592,6 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, goto err_desc_get; } - if (!first) { first = desc; } else { @@ -641,13 +640,13 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, spin_unlock_bh(&pd_chan->lock); - return 0; } static void pdc_tasklet(unsigned long data) { struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; + unsigned long flags; if (!pdc_is_idle(pd_chan)) { dev_err(chan2dev(&pd_chan->chan), @@ -655,12 +654,12 @@ static void pdc_tasklet(unsigned long data) return; } - spin_lock_bh(&pd_chan->lock); + spin_lock_irqsave(&pd_chan->lock, flags); if (test_and_clear_bit(0, &pd_chan->err_status)) pdc_handle_error(pd_chan); else pdc_advance_work(pd_chan); - spin_unlock_bh(&pd_chan->lock); + spin_unlock_irqrestore(&pd_chan->lock, flags); } static irqreturn_t pd_irq(int irq, void *devid) -- cgit v1.2.3 From 26d890f0d09fd58f7194aad651e86283cb9e6574 Mon Sep 17 00:00:00 2001 From: Tomoya MORINAGA Date: Fri, 18 Feb 2011 10:01:21 +0530 Subject: pch_dma: set the number of array correctly set the number of array correctly. Signed-off-by: Tomoya MORINAGA Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index bf2ddd601dc..b1dfba7e37b 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -82,7 +82,7 @@ struct pch_dma_regs { u32 dma_sts1; u32 reserved2; u32 reserved3; - struct pch_dma_desc_regs desc[0]; + struct pch_dma_desc_regs desc[MAX_CHAN_NR]; }; struct pch_dma_desc { @@ -124,7 +124,7 @@ struct pch_dma { struct pci_pool *pool; struct pch_dma_regs regs; struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; - struct pch_dma_chan channels[0]; + struct pch_dma_chan channels[MAX_CHAN_NR]; }; #define PCH_DMA_CTL0 0x00 -- cgit v1.2.3 From a580b8c5429a624d120cd603e1498bf676e2b4da Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Sun, 27 Feb 2011 00:47:42 +0800 Subject: dmaengine: mxs-dma: add dma support for i.MX23/28 This patch adds dma support for Freescale MXS-based SoC i.MX23/28, including apbh-dma and apbx-dma. * apbh-dma and apbx-dma are supported in the driver as two mxs-dma instances. * apbh-dma is different between mx23 and mx28, hardware version register is used to differentiate. * mxs-dma supports pio function besides data transfer. The driver uses dma_data_direction DMA_NONE to identify the pio mode, and steals sgl and sg_len to get pio words and numbers from clients. * mxs dmaengine has some very specific features, like sense function and the special NAND support (nand_lock, nand_wait4ready). These are too specific to implemented in generic dmaengine driver. * The driver refers to imx-sdma and only a single descriptor is statically assigned to each channel. Signed-off-by: Shawn Guo Signed-off-by: Vinod Koul --- arch/arm/mach-mxs/include/mach/dma.h | 26 ++ drivers/dma/Kconfig | 8 + drivers/dma/Makefile | 1 + drivers/dma/mxs-dma.c | 724 +++++++++++++++++++++++++++++++++++ 4 files changed, 759 insertions(+) create mode 100644 arch/arm/mach-mxs/include/mach/dma.h create mode 100644 drivers/dma/mxs-dma.c diff --git a/arch/arm/mach-mxs/include/mach/dma.h b/arch/arm/mach-mxs/include/mach/dma.h new file mode 100644 index 00000000000..7f4aeeaba8d --- /dev/null +++ b/arch/arm/mach-mxs/include/mach/dma.h @@ -0,0 +1,26 @@ +/* + * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MACH_MXS_DMA_H__ +#define __MACH_MXS_DMA_H__ + +struct mxs_dma_data { + int chan_irq; +}; + +static inline int mxs_dma_is_apbh(struct dma_chan *chan) +{ + return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbh"); +} + +static inline int mxs_dma_is_apbx(struct dma_chan *chan) +{ + return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbx"); +} + +#endif /* __MACH_MXS_DMA_H__ */ diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 1c28816152f..76f6472ba31 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -227,6 +227,14 @@ config IMX_DMA Support the i.MX DMA engine. This engine is integrated into Freescale i.MX1/21/27 chips. +config MXS_DMA + bool "MXS DMA support" + depends on SOC_IMX23 || SOC_IMX28 + select DMA_ENGINE + help + Support the MXS DMA engine. This engine including APBH-DMA + and APBX-DMA is integrated into Freescale i.MX23/28 chips. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 64b21f5cd74..802b55795f8 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o obj-$(CONFIG_IMX_DMA) += imx-dma.o +obj-$(CONFIG_MXS_DMA) += mxs-dma.o obj-$(CONFIG_TIMB_DMA) += timb_dma.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_PL330_DMA) += pl330.o diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c new file mode 100644 index 00000000000..88aad4f5400 --- /dev/null +++ b/drivers/dma/mxs-dma.c @@ -0,0 +1,724 @@ +/* + * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. + * + * Refer to drivers/dma/imx-sdma.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * NOTE: The term "PIO" throughout the mxs-dma implementation means + * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, + * dma can program the controller registers of peripheral devices. + */ + +#define MXS_DMA_APBH 0 +#define MXS_DMA_APBX 1 +#define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH) + +#define APBH_VERSION_LATEST 3 +#define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST) + +#define HW_APBHX_CTRL0 0x000 +#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) +#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) +#define BP_APBH_CTRL0_CLKGATE_CHANNEL 8 +#define BP_APBH_CTRL0_RESET_CHANNEL 16 +#define HW_APBHX_CTRL1 0x010 +#define HW_APBHX_CTRL2 0x020 +#define HW_APBHX_CHANNEL_CTRL 0x030 +#define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 +#define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800) +#define HW_APBX_VERSION 0x800 +#define BP_APBHX_VERSION_MAJOR 24 +#define HW_APBHX_CHn_NXTCMDAR(n) \ + (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70) +#define HW_APBHX_CHn_SEMA(n) \ + (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70) + +/* + * ccw bits definitions + * + * COMMAND: 0..1 (2) + * CHAIN: 2 (1) + * IRQ: 3 (1) + * NAND_LOCK: 4 (1) - not implemented + * NAND_WAIT4READY: 5 (1) - not implemented + * DEC_SEM: 6 (1) + * WAIT4END: 7 (1) + * HALT_ON_TERMINATE: 8 (1) + * TERMINATE_FLUSH: 9 (1) + * RESERVED: 10..11 (2) + * PIO_NUM: 12..15 (4) + */ +#define BP_CCW_COMMAND 0 +#define BM_CCW_COMMAND (3 << 0) +#define CCW_CHAIN (1 << 2) +#define CCW_IRQ (1 << 3) +#define CCW_DEC_SEM (1 << 6) +#define CCW_WAIT4END (1 << 7) +#define CCW_HALT_ON_TERM (1 << 8) +#define CCW_TERM_FLUSH (1 << 9) +#define BP_CCW_PIO_NUM 12 +#define BM_CCW_PIO_NUM (0xf << 12) + +#define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field) + +#define MXS_DMA_CMD_NO_XFER 0 +#define MXS_DMA_CMD_WRITE 1 +#define MXS_DMA_CMD_READ 2 +#define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */ + +struct mxs_dma_ccw { + u32 next; + u16 bits; + u16 xfer_bytes; +#define MAX_XFER_BYTES 0xff00 + u32 bufaddr; +#define MXS_PIO_WORDS 16 + u32 pio_words[MXS_PIO_WORDS]; +}; + +#define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) + +struct mxs_dma_chan { + struct mxs_dma_engine *mxs_dma; + struct dma_chan chan; + struct dma_async_tx_descriptor desc; + struct tasklet_struct tasklet; + int chan_irq; + struct mxs_dma_ccw *ccw; + dma_addr_t ccw_phys; + dma_cookie_t last_completed; + enum dma_status status; + unsigned int flags; +#define MXS_DMA_SG_LOOP (1 << 0) +}; + +#define MXS_DMA_CHANNELS 16 +#define MXS_DMA_CHANNELS_MASK 0xffff + +struct mxs_dma_engine { + int dev_id; + unsigned int version; + void __iomem *base; + struct clk *clk; + struct dma_device dma_device; + struct device_dma_parameters dma_parms; + struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; +}; + +static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) +{ + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + int chan_id = mxs_chan->chan.chan_id; + + if (dma_is_apbh() && apbh_is_old()) + writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + else + writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), + mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); +} + +static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) +{ + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + int chan_id = mxs_chan->chan.chan_id; + + /* set cmd_addr up */ + writel(mxs_chan->ccw_phys, + mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); + + /* enable apbh channel clock */ + if (dma_is_apbh()) { + if (apbh_is_old()) + writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), + mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); + else + writel(1 << chan_id, + mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); + } + + /* write 1 to SEMA to kick off the channel */ + writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); +} + +static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) +{ + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + int chan_id = mxs_chan->chan.chan_id; + + /* disable apbh channel clock */ + if (dma_is_apbh()) { + if (apbh_is_old()) + writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + else + writel(1 << chan_id, + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + } + + mxs_chan->status = DMA_SUCCESS; +} + +static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) +{ + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + int chan_id = mxs_chan->chan.chan_id; + + /* freeze the channel */ + if (dma_is_apbh() && apbh_is_old()) + writel(1 << chan_id, + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + else + writel(1 << chan_id, + mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); + + mxs_chan->status = DMA_PAUSED; +} + +static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) +{ + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + int chan_id = mxs_chan->chan.chan_id; + + /* unfreeze the channel */ + if (dma_is_apbh() && apbh_is_old()) + writel(1 << chan_id, + mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); + else + writel(1 << chan_id, + mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR); + + mxs_chan->status = DMA_IN_PROGRESS; +} + +static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan) +{ + dma_cookie_t cookie = mxs_chan->chan.cookie; + + if (++cookie < 0) + cookie = 1; + + mxs_chan->chan.cookie = cookie; + mxs_chan->desc.cookie = cookie; + + return cookie; +} + +static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct mxs_dma_chan, chan); +} + +static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan); + + mxs_dma_enable_chan(mxs_chan); + + return mxs_dma_assign_cookie(mxs_chan); +} + +static void mxs_dma_tasklet(unsigned long data) +{ + struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data; + + if (mxs_chan->desc.callback) + mxs_chan->desc.callback(mxs_chan->desc.callback_param); +} + +static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) +{ + struct mxs_dma_engine *mxs_dma = dev_id; + u32 stat1, stat2; + + /* completion status */ + stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); + stat1 &= MXS_DMA_CHANNELS_MASK; + writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR); + + /* error status */ + stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); + writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR); + + /* + * When both completion and error of termination bits set at the + * same time, we do not take it as an error. IOW, it only becomes + * an error we need to handler here in case of ether it's (1) an bus + * error or (2) a termination error with no completion. + */ + stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ + (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ + + /* combine error and completion status for checking */ + stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; + while (stat1) { + int channel = fls(stat1) - 1; + struct mxs_dma_chan *mxs_chan = + &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; + + if (channel >= MXS_DMA_CHANNELS) { + dev_dbg(mxs_dma->dma_device.dev, + "%s: error in channel %d\n", __func__, + channel - MXS_DMA_CHANNELS); + mxs_chan->status = DMA_ERROR; + mxs_dma_reset_chan(mxs_chan); + } else { + if (mxs_chan->flags & MXS_DMA_SG_LOOP) + mxs_chan->status = DMA_IN_PROGRESS; + else + mxs_chan->status = DMA_SUCCESS; + } + + stat1 &= ~(1 << channel); + + if (mxs_chan->status == DMA_SUCCESS) + mxs_chan->last_completed = mxs_chan->desc.cookie; + + /* schedule tasklet on this channel */ + tasklet_schedule(&mxs_chan->tasklet); + } + + return IRQ_HANDLED; +} + +static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + struct mxs_dma_data *data = chan->private; + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + int ret; + + if (!data) + return -EINVAL; + + mxs_chan->chan_irq = data->chan_irq; + + mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, + &mxs_chan->ccw_phys, GFP_KERNEL); + if (!mxs_chan->ccw) { + ret = -ENOMEM; + goto err_alloc; + } + + memset(mxs_chan->ccw, 0, PAGE_SIZE); + + ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, + 0, "mxs-dma", mxs_dma); + if (ret) + goto err_irq; + + ret = clk_enable(mxs_dma->clk); + if (ret) + goto err_clk; + + mxs_dma_reset_chan(mxs_chan); + + dma_async_tx_descriptor_init(&mxs_chan->desc, chan); + mxs_chan->desc.tx_submit = mxs_dma_tx_submit; + + /* the descriptor is ready */ + async_tx_ack(&mxs_chan->desc); + + return 0; + +err_clk: + free_irq(mxs_chan->chan_irq, mxs_dma); +err_irq: + dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, + mxs_chan->ccw, mxs_chan->ccw_phys); +err_alloc: + return ret; +} + +static void mxs_dma_free_chan_resources(struct dma_chan *chan) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + + mxs_dma_disable_chan(mxs_chan); + + free_irq(mxs_chan->chan_irq, mxs_dma); + + dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, + mxs_chan->ccw, mxs_chan->ccw_phys); + + clk_disable(mxs_dma->clk); +} + +static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction direction, + unsigned long append) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + struct mxs_dma_ccw *ccw; + struct scatterlist *sg; + int i, j; + u32 *pio; + static int idx; + + if (mxs_chan->status == DMA_IN_PROGRESS && !append) + return NULL; + + if (sg_len + (append ? idx : 0) > NUM_CCW) { + dev_err(mxs_dma->dma_device.dev, + "maximum number of sg exceeded: %d > %d\n", + sg_len, NUM_CCW); + goto err_out; + } + + mxs_chan->status = DMA_IN_PROGRESS; + mxs_chan->flags = 0; + + /* + * If the sg is prepared with append flag set, the sg + * will be appended to the last prepared sg. + */ + if (append) { + BUG_ON(idx < 1); + ccw = &mxs_chan->ccw[idx - 1]; + ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; + ccw->bits |= CCW_CHAIN; + ccw->bits &= ~CCW_IRQ; + ccw->bits &= ~CCW_DEC_SEM; + ccw->bits &= ~CCW_WAIT4END; + } else { + idx = 0; + } + + if (direction == DMA_NONE) { + ccw = &mxs_chan->ccw[idx++]; + pio = (u32 *) sgl; + + for (j = 0; j < sg_len;) + ccw->pio_words[j++] = *pio++; + + ccw->bits = 0; + ccw->bits |= CCW_IRQ; + ccw->bits |= CCW_DEC_SEM; + ccw->bits |= CCW_WAIT4END; + ccw->bits |= CCW_HALT_ON_TERM; + ccw->bits |= CCW_TERM_FLUSH; + ccw->bits |= BF_CCW(sg_len, PIO_NUM); + ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); + } else { + for_each_sg(sgl, sg, sg_len, i) { + if (sg->length > MAX_XFER_BYTES) { + dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", + sg->length, MAX_XFER_BYTES); + goto err_out; + } + + ccw = &mxs_chan->ccw[idx++]; + + ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; + ccw->bufaddr = sg->dma_address; + ccw->xfer_bytes = sg->length; + + ccw->bits = 0; + ccw->bits |= CCW_CHAIN; + ccw->bits |= CCW_HALT_ON_TERM; + ccw->bits |= CCW_TERM_FLUSH; + ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? + MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, + COMMAND); + + if (i + 1 == sg_len) { + ccw->bits &= ~CCW_CHAIN; + ccw->bits |= CCW_IRQ; + ccw->bits |= CCW_DEC_SEM; + ccw->bits |= CCW_WAIT4END; + } + } + } + + return &mxs_chan->desc; + +err_out: + mxs_chan->status = DMA_ERROR; + return NULL; +} + +static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, + size_t period_len, enum dma_data_direction direction) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + int num_periods = buf_len / period_len; + int i = 0, buf = 0; + + if (mxs_chan->status == DMA_IN_PROGRESS) + return NULL; + + mxs_chan->status = DMA_IN_PROGRESS; + mxs_chan->flags |= MXS_DMA_SG_LOOP; + + if (num_periods > NUM_CCW) { + dev_err(mxs_dma->dma_device.dev, + "maximum number of sg exceeded: %d > %d\n", + num_periods, NUM_CCW); + goto err_out; + } + + if (period_len > MAX_XFER_BYTES) { + dev_err(mxs_dma->dma_device.dev, + "maximum period size exceeded: %d > %d\n", + period_len, MAX_XFER_BYTES); + goto err_out; + } + + while (buf < buf_len) { + struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i]; + + if (i + 1 == num_periods) + ccw->next = mxs_chan->ccw_phys; + else + ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1); + + ccw->bufaddr = dma_addr; + ccw->xfer_bytes = period_len; + + ccw->bits = 0; + ccw->bits |= CCW_CHAIN; + ccw->bits |= CCW_IRQ; + ccw->bits |= CCW_HALT_ON_TERM; + ccw->bits |= CCW_TERM_FLUSH; + ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? + MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); + + dma_addr += period_len; + buf += period_len; + + i++; + } + + return &mxs_chan->desc; + +err_out: + mxs_chan->status = DMA_ERROR; + return NULL; +} + +static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + int ret = 0; + + switch (cmd) { + case DMA_TERMINATE_ALL: + mxs_dma_disable_chan(mxs_chan); + break; + case DMA_PAUSE: + mxs_dma_pause_chan(mxs_chan); + break; + case DMA_RESUME: + mxs_dma_resume_chan(mxs_chan); + break; + default: + ret = -ENOSYS; + } + + return ret; +} + +static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + dma_cookie_t last_used; + + last_used = chan->cookie; + dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0); + + return mxs_chan->status; +} + +static void mxs_dma_issue_pending(struct dma_chan *chan) +{ + /* + * Nothing to do. We only have a single descriptor. + */ +} + +static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) +{ + int ret; + + ret = clk_enable(mxs_dma->clk); + if (ret) + goto err_out; + + ret = mxs_reset_block(mxs_dma->base); + if (ret) + goto err_out; + + /* only major version matters */ + mxs_dma->version = readl(mxs_dma->base + + ((mxs_dma->dev_id == MXS_DMA_APBX) ? + HW_APBX_VERSION : HW_APBH_VERSION)) >> + BP_APBHX_VERSION_MAJOR; + + /* enable apbh burst */ + if (dma_is_apbh()) { + writel(BM_APBH_CTRL0_APB_BURST_EN, + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + writel(BM_APBH_CTRL0_APB_BURST8_EN, + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); + } + + /* enable irq for all the channels */ + writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, + mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); + + clk_disable(mxs_dma->clk); + + return 0; + +err_out: + return ret; +} + +static int __init mxs_dma_probe(struct platform_device *pdev) +{ + const struct platform_device_id *id_entry = + platform_get_device_id(pdev); + struct mxs_dma_engine *mxs_dma; + struct resource *iores; + int ret, i; + + mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL); + if (!mxs_dma) + return -ENOMEM; + + mxs_dma->dev_id = id_entry->driver_data; + + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if (!request_mem_region(iores->start, resource_size(iores), + pdev->name)) { + ret = -EBUSY; + goto err_request_region; + } + + mxs_dma->base = ioremap(iores->start, resource_size(iores)); + if (!mxs_dma->base) { + ret = -ENOMEM; + goto err_ioremap; + } + + mxs_dma->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(mxs_dma->clk)) { + ret = PTR_ERR(mxs_dma->clk); + goto err_clk; + } + + dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask); + dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask); + + INIT_LIST_HEAD(&mxs_dma->dma_device.channels); + + /* Initialize channel parameters */ + for (i = 0; i < MXS_DMA_CHANNELS; i++) { + struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; + + mxs_chan->mxs_dma = mxs_dma; + mxs_chan->chan.device = &mxs_dma->dma_device; + + tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, + (unsigned long) mxs_chan); + + + /* Add the channel to mxs_chan list */ + list_add_tail(&mxs_chan->chan.device_node, + &mxs_dma->dma_device.channels); + } + + ret = mxs_dma_init(mxs_dma); + if (ret) + goto err_init; + + mxs_dma->dma_device.dev = &pdev->dev; + + /* mxs_dma gets 65535 bytes maximum sg size */ + mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms; + dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES); + + mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources; + mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources; + mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; + mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; + mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; + mxs_dma->dma_device.device_control = mxs_dma_control; + mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; + + ret = dma_async_device_register(&mxs_dma->dma_device); + if (ret) { + dev_err(mxs_dma->dma_device.dev, "unable to register\n"); + goto err_init; + } + + dev_info(mxs_dma->dma_device.dev, "initialized\n"); + + return 0; + +err_init: + clk_put(mxs_dma->clk); +err_clk: + iounmap(mxs_dma->base); +err_ioremap: + release_mem_region(iores->start, resource_size(iores)); +err_request_region: + kfree(mxs_dma); + return ret; +} + +static struct platform_device_id mxs_dma_type[] = { + { + .name = "mxs-dma-apbh", + .driver_data = MXS_DMA_APBH, + }, { + .name = "mxs-dma-apbx", + .driver_data = MXS_DMA_APBX, + } +}; + +static struct platform_driver mxs_dma_driver = { + .driver = { + .name = "mxs-dma", + }, + .id_table = mxs_dma_type, +}; + +static int __init mxs_dma_module_init(void) +{ + return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe); +} +subsys_initcall(mxs_dma_module_init); -- cgit v1.2.3 From f44ad7e91dd12bed0959b3e715f4f3ab84951a59 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:14 +0530 Subject: dw_dmac: Remove compilation dependency from AVR32 and put on HAVE_CLK This driver will now be used in atleast two platforms AVR32 & ARM. And there is no actual hardware dependency of this driver over AVR32 or ARM. So this dependency can be removed altogether. Also dw_dmac driver uses clk framework and must have compilation dependency on HAVE_CLK Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 76f6472ba31..d700895cb40 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -82,7 +82,7 @@ config INTEL_IOP_ADMA config DW_DMAC tristate "Synopsys DesignWare AHB DMA support" - depends on AVR32 + depends on HAVE_CLK select DMA_ENGINE default y if CPU_AT32AP7000 help -- cgit v1.2.3 From cb689a706d17ef19a61735670ded60466dd015fa Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:15 +0530 Subject: dw_dmac: Replace module_init() with subsys_initcall() In some cases users of dw_dmac are initialized before dw_dmac, and if they try to use dw_dmac, they simply fail. So its better we register init() routine of driver using subsys_initcall() instead of module_init(), so that dma driver is available at the earliest possible. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 08dab3badad..064a1830a76 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -1455,7 +1455,7 @@ static int __init dw_init(void) { return platform_driver_probe(&dw_driver, dw_probe); } -module_init(dw_init); +subsys_initcall(dw_init); static void __exit dw_exit(void) { -- cgit v1.2.3 From f336e42f73d93b74fd21bf9176ee6c7ab8b195c5 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:16 +0530 Subject: dw_dmac: Move single descriptor from dwc->queue to dwc->active_list in dwc_complete_all dwc_complete_all and other routines was removing all descriptors from dwc->queue and pushing them to dwc->active_list. Only one was required to be removed. Also we are calling dwc_dostart, once list is fixed. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 064a1830a76..942b50f57f2 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -87,11 +87,6 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) return list_entry(dwc->active_list.next, struct dw_desc, desc_node); } -static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc) -{ - return list_entry(dwc->queue.next, struct dw_desc, desc_node); -} - static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) { struct dw_desc *desc, *_desc; @@ -262,10 +257,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) * Submit queued descriptors ASAP, i.e. before we go through * the completed ones. */ - if (!list_empty(&dwc->queue)) - dwc_dostart(dwc, dwc_first_queued(dwc)); list_splice_init(&dwc->active_list, &list); - list_splice_init(&dwc->queue, &dwc->active_list); + if (!list_empty(&dwc->queue)) { + list_move(dwc->queue.next, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); + } list_for_each_entry_safe(desc, _desc, &list, desc_node) dwc_descriptor_complete(dwc, desc); @@ -325,8 +321,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) cpu_relax(); if (!list_empty(&dwc->queue)) { - dwc_dostart(dwc, dwc_first_queued(dwc)); - list_splice_init(&dwc->queue, &dwc->active_list); + list_move(dwc->queue.next, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); } } @@ -352,7 +348,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) */ bad_desc = dwc_first_active(dwc); list_del_init(&bad_desc->desc_node); - list_splice_init(&dwc->queue, dwc->active_list.prev); + list_move(dwc->queue.next, dwc->active_list.prev); /* Clear the error flag and try to restart the controller */ dma_writel(dw, CLEAR.ERROR, dwc->mask); @@ -547,8 +543,8 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) if (list_empty(&dwc->active_list)) { dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", desc->txd.cookie); - dwc_dostart(dwc, desc); list_add_tail(&desc->desc_node, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); } else { dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", desc->txd.cookie); -- cgit v1.2.3 From 569432efa7975f5795efb8142134f5a098942381 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:17 +0530 Subject: dw_dmac: Calling dwc_scan_descriptors from dwc_tx_status() after taking lock Lock must be taken before calling dwc_scan_descriptors, as this may access/modify shared data and queues. dwc_tx_status wasn't taking lock before calling this routine. This patch add code that takes lock before calling dwc_scan_descriptors. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 942b50f57f2..2b0d5e99ae7 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -836,7 +836,9 @@ dwc_tx_status(struct dma_chan *chan, ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret != DMA_SUCCESS) { + spin_lock_bh(&dwc->lock); dwc_scan_descriptors(to_dw_dma(chan->device), dwc); + spin_unlock_bh(&dwc->lock); last_complete = dwc->completed; last_used = chan->cookie; -- cgit v1.2.3 From a02274564dd78f7edde3c9ff197ed44f2f8a5a81 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:18 +0530 Subject: dw_dmac: Adding support for 64 bit access width for memcpy xfers Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 2b0d5e99ae7..e5d97bf9264 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -583,7 +583,9 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, * We can be a lot more clever here, but this should take care * of the most common optimization. */ - if (!((src | dest | len) & 3)) + if (!((src | dest | len) & 7)) + src_width = dst_width = 3; + else if (!((src | dest | len) & 3)) src_width = dst_width = 2; else if (!((src | dest | len) & 1)) src_width = dst_width = 1; -- cgit v1.2.3 From 418e74070662e1ae7d9bb5202f773d35c9a7f05e Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 4 Mar 2011 15:42:50 +0530 Subject: dw_dmac: Change value of DWC_MAX_COUNT to 4095. Every descriptor can transfer a maximum count of 4095 (12 bits, in control reg), So we must have DWC_MAX_COUNT as 4095 instead of 2048. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index e5d97bf9264..711ebe954f5 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -47,14 +47,13 @@ /* * This is configuration-dependent and usually a funny size like 4095. - * Let's round it down to the nearest power of two. * * Note that this is a transfer count, i.e. if we transfer 32-bit - * words, we can do 8192 bytes per descriptor. + * words, we can do 16380 bytes per descriptor. * * This parameter is also system-specific. */ -#define DWC_MAX_COUNT 2048U +#define DWC_MAX_COUNT 4095U /* * Number of descriptors to allocate for each channel. This should be -- cgit v1.2.3 From e518076ef8cb56adb558ff56ad5bfa0cd9f3abd9 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:20 +0530 Subject: dw_dmac: Mark all tx_descriptors with DMA_CRTL_ACK after xfer finish dwc_desc_get checks all descriptors for DMA_CTRL_ACK before allocating them for transfers. And descriptors are not marked with DMA_CRTL_ACK after transfer finishes. Thus descriptor once used is not usable again. This patch marks descriptors with DMA_CRTL_ACK after dma xfer finishes Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 711ebe954f5..6ab440e532b 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -198,6 +198,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) dma_async_tx_callback callback; void *param; struct dma_async_tx_descriptor *txd = &desc->txd; + struct dw_desc *child; dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); @@ -206,6 +207,12 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) param = txd->callback_param; dwc_sync_desc_for_cpu(dwc, desc); + + /* async_tx_ack */ + list_for_each_entry(child, &desc->tx_list, desc_node) + async_tx_ack(&child->txd); + async_tx_ack(&desc->txd); + list_splice_init(&desc->tx_list, &dwc->free_list); list_move(&desc->desc_node, &dwc->free_list); -- cgit v1.2.3 From b0c3130d69bda5cd91aa3b3f08e7878df49fde69 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:21 +0530 Subject: dw_dmac: Pass Channel Allocation Order from platform_data In SPEAr Platform channels 4-7 have more Fifo depth. So we must get better channel first. This patch introduces concept of channel allocation order in dw_dmac. If user doesn't pass anything or 0, than normal (ascending) channel allocation will follow, else channels will be allocated in descending order. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 6 +++++- include/linux/dw_dmac.h | 3 +++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 6ab440e532b..f413e123405 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -1319,7 +1319,11 @@ static int __init dw_probe(struct platform_device *pdev) dwc->chan.device = &dw->dma; dwc->chan.cookie = dwc->completed = 1; dwc->chan.chan_id = i; - list_add_tail(&dwc->chan.device_node, &dw->dma.channels); + if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) + list_add_tail(&dwc->chan.device_node, + &dw->dma.channels); + else + list_add(&dwc->chan.device_node, &dw->dma.channels); dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; spin_lock_init(&dwc->lock); diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index deec66b3718..a18c498984d 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h @@ -22,6 +22,9 @@ struct dw_dma_platform_data { unsigned int nr_channels; bool is_private; +#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ +#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ + unsigned char chan_allocation_order; }; /** -- cgit v1.2.3 From 93317e8e35b77633d589fe0e132291195757d785 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:22 +0530 Subject: dw_dmac: Pass Channel Priority from platform_data In Synopsys designware, channel priority is programmable. This patch adds support for passing channel priority through platform data. By default Ascending channel priority will be followed, i.e. channel 0 will get highest priority and channel 7 will get lowest. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 11 ++++++++++- drivers/dma/dw_dmac_regs.h | 3 +++ include/linux/dw_dmac.h | 4 +++- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index f413e123405..318a342fc7e 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -901,8 +901,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); cfghi = dws->cfg_hi; - cfglo = dws->cfg_lo; + cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; } + + cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority); + channel_writel(dwc, CFG_LO, cfglo); channel_writel(dwc, CFG_HI, cfghi); @@ -1325,6 +1328,12 @@ static int __init dw_probe(struct platform_device *pdev) else list_add(&dwc->chan.device_node, &dw->dma.channels); + /* 7 is highest priority & 0 is lowest. */ + if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) + dwc->priority = 7 - i; + else + dwc->priority = i; + dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; spin_lock_init(&dwc->lock); dwc->mask = 1 << i; diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index d9a939f67f4..6a8e6d35f35 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -101,6 +101,8 @@ struct dw_dma_regs { #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff /* Bitfields in CFG_LO. Platform-configurable bits are in */ +#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ +#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ @@ -134,6 +136,7 @@ struct dw_dma_chan { struct dma_chan chan; void __iomem *ch_regs; u8 mask; + u8 priority; spinlock_t lock; diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index a18c498984d..64c76da571e 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h @@ -25,6 +25,9 @@ struct dw_dma_platform_data { #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ unsigned char chan_allocation_order; +#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ +#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ + unsigned char chan_priority; }; /** @@ -70,7 +73,6 @@ struct dw_dma_slave { #define DWC_CFGH_DST_PER(x) ((x) << 11) /* Platform-configurable bits in CFG_LO */ -#define DWC_CFGL_PRIO(x) ((x) << 5) /* priority */ #define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ #define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) #define DWC_CFGL_LOCK_CH_XACT (2 << 12) -- cgit v1.2.3 From 59c22fc11d12b69da36c6585a38229863ba0bb16 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:23 +0530 Subject: dw_dmac: Changing type of src_master and dest_master to u8. src_master & dest_master don't required u32 as they have values limited to u8 only. Also their description is missing from doc style comment. This patch fixes above mentioned issues. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- include/linux/dw_dmac.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index 64c76da571e..3ba2f066ff4 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h @@ -53,6 +53,8 @@ enum dw_dma_slave_width { * @reg_width: peripheral register width * @cfg_hi: Platform-specific initializer for the CFG_HI register * @cfg_lo: Platform-specific initializer for the CFG_LO register + * @src_master: src master for transfers on allocated channel. + * @dst_master: dest master for transfers on allocated channel. */ struct dw_dma_slave { struct device *dma_dev; @@ -61,8 +63,8 @@ struct dw_dma_slave { enum dw_dma_slave_width reg_width; u32 cfg_hi; u32 cfg_lo; - int src_master; - int dst_master; + u8 src_master; + u8 dst_master; }; /* Platform-configurable bits in CFG_HI */ -- cgit v1.2.3 From ee66509d7f354eecb45ac99f21ea6aa8650dea7e Mon Sep 17 00:00:00 2001 From: Viresh KUMAR Date: Fri, 4 Mar 2011 15:42:51 +0530 Subject: dw_dmac: Allow src/dst msize & flow controller to be configured at runtime Msize or Burst Size is peripheral dependent in case of prep_slave_sg and cyclic_prep transfers, and in case of memcpy transfers it is platform dependent. So msize configuration must come from platform data. Also some peripherals (ex: JPEG), need to be flow controller for dma transfers, so this information in case of slave_sg & cyclic_prep transfers must come from platform data. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 14 ++++++++------ drivers/dma/dw_dmac_regs.h | 1 + include/linux/dw_dmac.h | 30 ++++++++++++++++++++++++++++++ 3 files changed, 39 insertions(+), 6 deletions(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 318a342fc7e..90ea08a53d6 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -36,9 +36,11 @@ struct dw_dma_slave *__slave = (private); \ int dms = __slave ? __slave->dst_master : 0; \ int sms = __slave ? __slave->src_master : 1; \ + u8 smsize = __slave ? __slave->src_msize : 0; \ + u8 dmsize = __slave ? __slave->dst_msize : 0; \ \ - (DWC_CTLL_DST_MSIZE(0) \ - | DWC_CTLL_SRC_MSIZE(0) \ + (DWC_CTLL_DST_MSIZE(dmsize) \ + | DWC_CTLL_SRC_MSIZE(smsize) \ | DWC_CTLL_LLP_D_EN \ | DWC_CTLL_LLP_S_EN \ | DWC_CTLL_DMS(dms) \ @@ -683,7 +685,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_DST_FIX | DWC_CTLL_SRC_INC - | DWC_CTLL_FC_M2P); + | DWC_CTLL_FC(dws->fc)); reg = dws->tx_reg; for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; @@ -728,7 +730,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_DST_INC | DWC_CTLL_SRC_FIX - | DWC_CTLL_FC_P2M); + | DWC_CTLL_FC(dws->fc)); reg = dws->rx_reg; for_each_sg(sgl, sg, sg_len, i) { @@ -1146,7 +1148,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_DST_FIX | DWC_CTLL_SRC_INC - | DWC_CTLL_FC_M2P + | DWC_CTLL_FC(dws->fc) | DWC_CTLL_INT_EN); break; case DMA_FROM_DEVICE: @@ -1157,7 +1159,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_DST_INC | DWC_CTLL_SRC_FIX - | DWC_CTLL_FC_P2M + | DWC_CTLL_FC(dws->fc) | DWC_CTLL_INT_EN); break; default: diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 6a8e6d35f35..9a32964bf79 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -86,6 +86,7 @@ struct dw_dma_regs { #define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) #define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ #define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ +#define DWC_CTLL_FC(n) ((n) << 20) #define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ #define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ #define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index 3ba2f066ff4..6998d9376ef 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h @@ -42,6 +42,30 @@ enum dw_dma_slave_width { DW_DMA_SLAVE_WIDTH_32BIT, }; +/* bursts size */ +enum dw_dma_msize { + DW_DMA_MSIZE_1, + DW_DMA_MSIZE_4, + DW_DMA_MSIZE_8, + DW_DMA_MSIZE_16, + DW_DMA_MSIZE_32, + DW_DMA_MSIZE_64, + DW_DMA_MSIZE_128, + DW_DMA_MSIZE_256, +}; + +/* flow controller */ +enum dw_dma_fc { + DW_DMA_FC_D_M2M, + DW_DMA_FC_D_M2P, + DW_DMA_FC_D_P2M, + DW_DMA_FC_D_P2P, + DW_DMA_FC_P_P2M, + DW_DMA_FC_SP_P2P, + DW_DMA_FC_P_M2P, + DW_DMA_FC_DP_P2P, +}; + /** * struct dw_dma_slave - Controller-specific information about a slave * @@ -55,6 +79,9 @@ enum dw_dma_slave_width { * @cfg_lo: Platform-specific initializer for the CFG_LO register * @src_master: src master for transfers on allocated channel. * @dst_master: dest master for transfers on allocated channel. + * @src_msize: src burst size. + * @dst_msize: dest burst size. + * @fc: flow controller for DMA transfer */ struct dw_dma_slave { struct device *dma_dev; @@ -65,6 +92,9 @@ struct dw_dma_slave { u32 cfg_lo; u8 src_master; u8 dst_master; + u8 src_msize; + u8 dst_msize; + u8 fc; }; /* Platform-configurable bits in CFG_HI */ -- cgit v1.2.3 From e51dc53b8c7fa2d9ac4ef8f317f5dfe07a79e65a Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:25 +0530 Subject: dw_dmac: Setting Default Burst length for transfers as 16. This patch sets default Burst length for all transfer to 16. This will enhance performance when user doesn't have any chan->private data. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 90ea08a53d6..9c25c7d099e 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -36,8 +36,8 @@ struct dw_dma_slave *__slave = (private); \ int dms = __slave ? __slave->dst_master : 0; \ int sms = __slave ? __slave->src_master : 1; \ - u8 smsize = __slave ? __slave->src_msize : 0; \ - u8 dmsize = __slave ? __slave->dst_msize : 0; \ + u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \ + u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \ \ (DWC_CTLL_DST_MSIZE(dmsize) \ | DWC_CTLL_SRC_MSIZE(smsize) \ -- cgit v1.2.3 From 1c5b0538c719f52cface39f699fb5d39a50149d6 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 3 Mar 2011 15:47:26 +0530 Subject: avr32: at32ap700x: Specify DMA Flow Controller, Src and Dst msize Now that the dw_dmac DMA driver supports configurable Flow Controller, source and destination burst or msize, we need to specify which ones to use. Msize or burst size was previously hardcoded to 1, Flow controller was DMA for both M2P & P2M transfers. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- arch/avr32/mach-at32ap/at32ap700x.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index 2747cde8c9a..b4aaebd8780 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -2050,6 +2050,9 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); rx_dws->src_master = 0; rx_dws->dst_master = 1; + rx_dws->src_msize = DW_DMA_MSIZE_1; + rx_dws->dst_msize = DW_DMA_MSIZE_1; + rx_dws->fc = DW_DMA_FC_D_P2M; } /* Check if DMA slave interface for playback should be configured. */ @@ -2060,6 +2063,9 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); rx_dws->src_master = 0; rx_dws->dst_master = 1; + tx_dws->src_msize = DW_DMA_MSIZE_1; + tx_dws->dst_msize = DW_DMA_MSIZE_1; + tx_dws->fc = DW_DMA_FC_D_M2P; } if (platform_device_add_data(pdev, data, @@ -2134,6 +2140,9 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data) dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); dws->src_master = 0; dws->dst_master = 1; + dws->src_msize = DW_DMA_MSIZE_1; + dws->dst_msize = DW_DMA_MSIZE_1; + dws->fc = DW_DMA_FC_D_M2P; if (platform_device_add_data(pdev, data, sizeof(struct atmel_abdac_pdata))) -- cgit v1.2.3 From 29782da5f0206335e2325508ba4fee0d624ddab6 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 4 Mar 2011 14:58:32 +0530 Subject: dmaengine/dw_dmac fix: use readl & writel instead of __raw_readl & __raw_writel On ARMv7 cores, device memory mapped as Normal Non-cacheable, may not guarantee ordered access causing failures in device drivers that do not use the mandatory memory barriers. readl & writel versions contain necessary memory barriers for this. commit 79f64dbf68c8a9779a7e9a25e0a9f0217a25b57a: "ARM: 6273/1: Add barriers to the I/O accessors if ARM_DMA_MEM_BUFFERABLE" can be referred for more information on this. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw_dmac_regs.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 9a32964bf79..720f821527f 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -159,9 +159,9 @@ __dwc_regs(struct dw_dma_chan *dwc) } #define channel_readl(dwc, name) \ - __raw_readl(&(__dwc_regs(dwc)->name)) + readl(&(__dwc_regs(dwc)->name)) #define channel_writel(dwc, name, val) \ - __raw_writel((val), &(__dwc_regs(dwc)->name)) + writel((val), &(__dwc_regs(dwc)->name)) static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) { @@ -185,9 +185,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) } #define dma_readl(dw, name) \ - __raw_readl(&(__dw_regs(dw)->name)) + readl(&(__dw_regs(dw)->name)) #define dma_writel(dw, name, val) \ - __raw_writel((val), &(__dw_regs(dw)->name)) + writel((val), &(__dw_regs(dw)->name)) #define channel_set_bit(dw, reg, mask) \ dma_writel(dw, reg, ((mask) << 8) | (mask)) -- cgit v1.2.3 From 0b863b333f529c7ddd8bee58e6696a7254417a05 Mon Sep 17 00:00:00 2001 From: Rakib Mullick Date: Sun, 6 Mar 2011 17:26:10 +0600 Subject: drivers, pch_dma: Fix warning when CONFIG_PM=n. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When CONFIG_PM=n, we get the following warning: drivers/dma/pch_dma.c:741: warning: ‘pch_dma_suspend’ defined but not used drivers/dma/pch_dma.c:755: warning: ‘pch_dma_resume’ defined but not used To fix it, wrap pch_dma_{suspend,resume} and pch_dma_{save,restore}_regs functions with CONFIG_PM. Signed-off-by: Rakib Mullick Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index b1dfba7e37b..8d8fef1480a 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -693,6 +693,7 @@ static irqreturn_t pd_irq(int irq, void *devid) return ret; } +#ifdef CONFIG_PM static void pch_dma_save_regs(struct pch_dma *pd) { struct pch_dma_chan *pd_chan; @@ -770,6 +771,7 @@ static int pch_dma_resume(struct pci_dev *pdev) return 0; } +#endif static int __devinit pch_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id) -- cgit v1.2.3 From b203bd3f6b9c3db3b1979c2ff79bb2b9be8f03a3 Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Thu, 3 Mar 2011 07:54:53 +0000 Subject: dmatest: fix automatic buffer unmap type The dmatest code relies on the DMAEngine API to automatically call dma_unmap_single() on src buffers. The flags it passes are incorrect, fix them. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 5589358b684..7e1b0aa0ca5 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -285,7 +285,12 @@ static int dmatest_func(void *data) set_user_nice(current, 10); - flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; + /* + * src buffers are freed by the DMAEngine code with dma_unmap_single() + * dst buffers are freed by ourselves below + */ + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT + | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; while (!kthread_should_stop() && !(iterations && total_tests >= iterations)) { -- cgit v1.2.3 From e8bd84df27c5921a9ac866aef06e044590ac118f Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Thu, 3 Mar 2011 07:54:54 +0000 Subject: fsldma: move related helper functions near each other This is a purely cosmetic cleanup. It is nice to have related functions right next to each other in the code. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 116 ++++++++++++++++++++++++++++----------------------- 1 file changed, 64 insertions(+), 52 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 4de947a450f..2e1af4555b0 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -39,33 +39,9 @@ static const char msg_ld_oom[] = "No free memory for link descriptor\n"; -static void dma_init(struct fsldma_chan *chan) -{ - /* Reset the channel */ - DMA_OUT(chan, &chan->regs->mr, 0, 32); - - switch (chan->feature & FSL_DMA_IP_MASK) { - case FSL_DMA_IP_85XX: - /* Set the channel to below modes: - * EIE - Error interrupt enable - * EOSIE - End of segments interrupt enable (basic mode) - * EOLNIE - End of links interrupt enable - * BWC - Bandwidth sharing among channels - */ - DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC - | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE - | FSL_DMA_MR_EOSIE, 32); - break; - case FSL_DMA_IP_83XX: - /* Set the channel to below modes: - * EOTIE - End-of-transfer interrupt enable - * PRC_RM - PCI read multiple - */ - DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE - | FSL_DMA_MR_PRC_RM, 32); - break; - } -} +/* + * Register Helpers + */ static void set_sr(struct fsldma_chan *chan, u32 val) { @@ -77,6 +53,30 @@ static u32 get_sr(struct fsldma_chan *chan) return DMA_IN(chan, &chan->regs->sr, 32); } +static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) +{ + DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); +} + +static dma_addr_t get_cdar(struct fsldma_chan *chan) +{ + return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; +} + +static dma_addr_t get_ndar(struct fsldma_chan *chan) +{ + return DMA_IN(chan, &chan->regs->ndar, 64); +} + +static u32 get_bcr(struct fsldma_chan *chan) +{ + return DMA_IN(chan, &chan->regs->bcr, 32); +} + +/* + * Descriptor Helpers + */ + static void set_desc_cnt(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, u32 count) { @@ -113,24 +113,49 @@ static void set_desc_next(struct fsldma_chan *chan, hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); } -static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) +static void set_ld_eol(struct fsldma_chan *chan, + struct fsl_desc_sw *desc) { - DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); -} + u64 snoop_bits; -static dma_addr_t get_cdar(struct fsldma_chan *chan) -{ - return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; -} + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) + ? FSL_DMA_SNEN : 0; -static dma_addr_t get_ndar(struct fsldma_chan *chan) -{ - return DMA_IN(chan, &chan->regs->ndar, 64); + desc->hw.next_ln_addr = CPU_TO_DMA(chan, + DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL + | snoop_bits, 64); } -static u32 get_bcr(struct fsldma_chan *chan) +/* + * DMA Engine Hardware Control Helpers + */ + +static void dma_init(struct fsldma_chan *chan) { - return DMA_IN(chan, &chan->regs->bcr, 32); + /* Reset the channel */ + DMA_OUT(chan, &chan->regs->mr, 0, 32); + + switch (chan->feature & FSL_DMA_IP_MASK) { + case FSL_DMA_IP_85XX: + /* Set the channel to below modes: + * EIE - Error interrupt enable + * EOSIE - End of segments interrupt enable (basic mode) + * EOLNIE - End of links interrupt enable + * BWC - Bandwidth sharing among channels + */ + DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC + | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE + | FSL_DMA_MR_EOSIE, 32); + break; + case FSL_DMA_IP_83XX: + /* Set the channel to below modes: + * EOTIE - End-of-transfer interrupt enable + * PRC_RM - PCI read multiple + */ + DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE + | FSL_DMA_MR_PRC_RM, 32); + break; + } } static int dma_is_idle(struct fsldma_chan *chan) @@ -185,19 +210,6 @@ static void dma_halt(struct fsldma_chan *chan) dev_err(chan->dev, "DMA halt timeout!\n"); } -static void set_ld_eol(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) -{ - u64 snoop_bits; - - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) - ? FSL_DMA_SNEN : 0; - - desc->hw.next_ln_addr = CPU_TO_DMA(chan, - DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL - | snoop_bits, 64); -} - /** * fsl_chan_set_src_loop_size - Set source address hold transfer size * @chan : Freescale DMA channel -- cgit v1.2.3 From b158471ef63bf399165db96e945a828096502d9d Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Thu, 3 Mar 2011 07:54:55 +0000 Subject: fsldma: use channel name in printk output This makes debugging the driver much easier when multiple channels are running concurrently. In addition, you can see how much descriptor memory each channel has allocated via the dmapool API in sysfs. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 69 ++++++++++++++++++++++++++-------------------------- drivers/dma/fsldma.h | 1 + 2 files changed, 36 insertions(+), 34 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 2e1af4555b0..e535cd13f7c 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -37,7 +37,12 @@ #include "fsldma.h" -static const char msg_ld_oom[] = "No free memory for link descriptor\n"; +#define chan_dbg(chan, fmt, arg...) \ + dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg) +#define chan_err(chan, fmt, arg...) \ + dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) + +static const char msg_ld_oom[] = "No free memory for link descriptor"; /* * Register Helpers @@ -207,7 +212,7 @@ static void dma_halt(struct fsldma_chan *chan) } if (!dma_is_idle(chan)) - dev_err(chan->dev, "DMA halt timeout!\n"); + chan_err(chan, "DMA halt timeout!\n"); } /** @@ -405,7 +410,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); if (!desc) { - dev_dbg(chan->dev, "out of memory for link desc\n"); + chan_dbg(chan, "out of memory for link descriptor\n"); return NULL; } @@ -439,13 +444,11 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) * We need the descriptor to be aligned to 32bytes * for meeting FSL DMA specification requirement. */ - chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", - chan->dev, + chan->desc_pool = dma_pool_create(chan->name, chan->dev, sizeof(struct fsl_desc_sw), __alignof__(struct fsl_desc_sw), 0); if (!chan->desc_pool) { - dev_err(chan->dev, "unable to allocate channel %d " - "descriptor pool\n", chan->id); + chan_err(chan, "unable to allocate descriptor pool\n"); return -ENOMEM; } @@ -491,7 +494,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan) struct fsldma_chan *chan = to_fsl_chan(dchan); unsigned long flags; - dev_dbg(chan->dev, "Free all channel resources.\n"); + chan_dbg(chan, "free all channel resources\n"); spin_lock_irqsave(&chan->desc_lock, flags); fsldma_free_desc_list(chan, &chan->ld_pending); fsldma_free_desc_list(chan, &chan->ld_running); @@ -514,7 +517,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) new = fsl_dma_alloc_descriptor(chan); if (!new) { - dev_err(chan->dev, msg_ld_oom); + chan_err(chan, "%s\n", msg_ld_oom); return NULL; } @@ -551,11 +554,11 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( /* Allocate the link descriptor from DMA pool */ new = fsl_dma_alloc_descriptor(chan); if (!new) { - dev_err(chan->dev, msg_ld_oom); + chan_err(chan, "%s\n", msg_ld_oom); goto fail; } #ifdef FSL_DMA_LD_DEBUG - dev_dbg(chan->dev, "new link desc alloc %p\n", new); + chan_dbg(chan, "new link desc alloc %p\n", new); #endif copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); @@ -639,11 +642,11 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, /* allocate and populate the descriptor */ new = fsl_dma_alloc_descriptor(chan); if (!new) { - dev_err(chan->dev, msg_ld_oom); + chan_err(chan, "%s\n", msg_ld_oom); goto fail; } #ifdef FSL_DMA_LD_DEBUG - dev_dbg(chan->dev, "new link desc alloc %p\n", new); + chan_dbg(chan, "new link desc alloc %p\n", new); #endif set_desc_cnt(chan, &new->hw, len); @@ -815,7 +818,7 @@ static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) spin_lock_irqsave(&chan->desc_lock, flags); if (list_empty(&chan->ld_running)) { - dev_dbg(chan->dev, "no running descriptors\n"); + chan_dbg(chan, "no running descriptors\n"); goto out_unlock; } @@ -863,7 +866,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) spin_lock_irqsave(&chan->desc_lock, flags); - dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); + chan_dbg(chan, "chan completed_cookie = %d\n", chan->completed_cookie); list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { dma_async_tx_callback callback; void *callback_param; @@ -879,7 +882,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) callback_param = desc->async_tx.callback_param; if (callback) { spin_unlock_irqrestore(&chan->desc_lock, flags); - dev_dbg(chan->dev, "LD %p callback\n", desc); + chan_dbg(chan, "LD %p callback\n", desc); callback(callback_param); spin_lock_irqsave(&chan->desc_lock, flags); } @@ -913,7 +916,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) * don't need to do any work at all */ if (list_empty(&chan->ld_pending)) { - dev_dbg(chan->dev, "no pending LDs\n"); + chan_dbg(chan, "no pending LDs\n"); goto out_unlock; } @@ -923,7 +926,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) * at the end of the current transaction */ if (!dma_is_idle(chan)) { - dev_dbg(chan->dev, "DMA controller still busy\n"); + chan_dbg(chan, "DMA controller still busy\n"); goto out_unlock; } @@ -1003,14 +1006,14 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) /* save and clear the status register */ stat = get_sr(chan); set_sr(chan, stat); - dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); + chan_dbg(chan, "irq: stat = 0x%x\n", stat); stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); if (!stat) return IRQ_NONE; if (stat & FSL_DMA_SR_TE) - dev_err(chan->dev, "Transfer Error!\n"); + chan_err(chan, "Transfer Error!\n"); /* * Programming Error @@ -1018,7 +1021,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) * triger a PE interrupt. */ if (stat & FSL_DMA_SR_PE) { - dev_dbg(chan->dev, "irq: Programming Error INT\n"); + chan_dbg(chan, "irq: Programming Error INT\n"); if (get_bcr(chan) == 0) { /* BCR register is 0, this is a DMA_INTERRUPT async_tx. * Now, update the completed cookie, and continue the @@ -1035,8 +1038,8 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) * we will recycle the used descriptor. */ if (stat & FSL_DMA_SR_EOSI) { - dev_dbg(chan->dev, "irq: End-of-segments INT\n"); - dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n", + chan_dbg(chan, "irq: End-of-segments INT\n"); + chan_dbg(chan, "irq: clndar 0x%llx, nlndar 0x%llx\n", (unsigned long long)get_cdar(chan), (unsigned long long)get_ndar(chan)); stat &= ~FSL_DMA_SR_EOSI; @@ -1048,7 +1051,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) * and start the next transfer if it exist. */ if (stat & FSL_DMA_SR_EOCDI) { - dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); + chan_dbg(chan, "irq: End-of-Chain link INT\n"); stat &= ~FSL_DMA_SR_EOCDI; update_cookie = 1; xfer_ld_q = 1; @@ -1060,7 +1063,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) * prepare next transfer. */ if (stat & FSL_DMA_SR_EOLNI) { - dev_dbg(chan->dev, "irq: End-of-link INT\n"); + chan_dbg(chan, "irq: End-of-link INT\n"); stat &= ~FSL_DMA_SR_EOLNI; xfer_ld_q = 1; } @@ -1070,9 +1073,9 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) if (xfer_ld_q) fsl_chan_xfer_ld_queue(chan); if (stat) - dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); + chan_dbg(chan, "irq: unhandled sr 0x%08x\n", stat); - dev_dbg(chan->dev, "irq: Exit\n"); + chan_dbg(chan, "irq: Exit\n"); tasklet_schedule(&chan->tasklet); return IRQ_HANDLED; } @@ -1128,7 +1131,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev) for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { chan = fdev->chan[i]; if (chan && chan->irq != NO_IRQ) { - dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); + chan_dbg(chan, "free per-channel IRQ\n"); free_irq(chan->irq, chan); } } @@ -1155,19 +1158,16 @@ static int fsldma_request_irqs(struct fsldma_device *fdev) continue; if (chan->irq == NO_IRQ) { - dev_err(fdev->dev, "no interrupts property defined for " - "DMA channel %d. Please fix your " - "device tree\n", chan->id); + chan_err(chan, "interrupts property missing in device tree\n"); ret = -ENODEV; goto out_unwind; } - dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); + chan_dbg(chan, "request per-channel IRQ\n"); ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, "fsldma-chan", chan); if (ret) { - dev_err(fdev->dev, "unable to request IRQ for DMA " - "channel %d\n", chan->id); + chan_err(chan, "unable to request per-channel IRQ\n"); goto out_unwind; } } @@ -1242,6 +1242,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, fdev->chan[chan->id] = chan; tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); + snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id); /* Initialize the channel */ dma_init(chan); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index ba9f403c0fb..113e7134010 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -135,6 +135,7 @@ struct fsldma_device { #define FSL_DMA_CHAN_START_EXT 0x00002000 struct fsldma_chan { + char name[8]; /* Channel name */ struct fsldma_chan_regs __iomem *regs; dma_cookie_t completed_cookie; /* The maximum cookie completed */ spinlock_t desc_lock; /* Descriptor operation lock */ -- cgit v1.2.3 From 0ab09c36818ca88f65c88f4d8c6d067fbf10578d Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Thu, 3 Mar 2011 07:54:56 +0000 Subject: fsldma: improve link descriptor debugging This adds better tracking to link descriptor allocations, callbacks, and frees. This makes it much easier to track errors with link descriptors. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index e535cd13f7c..82b8e9f9c7b 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -420,6 +420,10 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( desc->async_tx.tx_submit = fsl_dma_tx_submit; desc->async_tx.phys = pdesc; +#ifdef FSL_DMA_LD_DEBUG + chan_dbg(chan, "LD %p allocated\n", desc); +#endif + return desc; } @@ -470,6 +474,9 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan, list_for_each_entry_safe(desc, _desc, list, node) { list_del(&desc->node); +#ifdef FSL_DMA_LD_DEBUG + chan_dbg(chan, "LD %p free\n", desc); +#endif dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } } @@ -481,6 +488,9 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, list_for_each_entry_safe_reverse(desc, _desc, list, node) { list_del(&desc->node); +#ifdef FSL_DMA_LD_DEBUG + chan_dbg(chan, "LD %p free\n", desc); +#endif dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } } @@ -557,9 +567,6 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( chan_err(chan, "%s\n", msg_ld_oom); goto fail; } -#ifdef FSL_DMA_LD_DEBUG - chan_dbg(chan, "new link desc alloc %p\n", new); -#endif copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); @@ -645,9 +652,6 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, chan_err(chan, "%s\n", msg_ld_oom); goto fail; } -#ifdef FSL_DMA_LD_DEBUG - chan_dbg(chan, "new link desc alloc %p\n", new); -#endif set_desc_cnt(chan, &new->hw, len); set_desc_src(chan, &new->hw, src); @@ -882,13 +886,18 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) callback_param = desc->async_tx.callback_param; if (callback) { spin_unlock_irqrestore(&chan->desc_lock, flags); +#ifdef FSL_DMA_LD_DEBUG chan_dbg(chan, "LD %p callback\n", desc); +#endif callback(callback_param); spin_lock_irqsave(&chan->desc_lock, flags); } /* Run any dependencies, then free the descriptor */ dma_run_dependencies(&desc->async_tx); +#ifdef FSL_DMA_LD_DEBUG + chan_dbg(chan, "LD %p free\n", desc); +#endif dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } -- cgit v1.2.3 From 31f4306c83a2daa3e348056b720de511bffe5a9b Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Thu, 3 Mar 2011 07:54:57 +0000 Subject: fsldma: minor codingstyle and consistency fixes This fixes some minor violations of the coding style. It also changes the style of the device_prep_dma_*() function definitions so they are identical. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 29 +++++++++++++---------------- drivers/dma/fsldma.h | 4 ++-- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 82b8e9f9c7b..5da1a4a817e 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -89,7 +89,7 @@ static void set_desc_cnt(struct fsldma_chan *chan, } static void set_desc_src(struct fsldma_chan *chan, - struct fsl_dma_ld_hw *hw, dma_addr_t src) + struct fsl_dma_ld_hw *hw, dma_addr_t src) { u64 snoop_bits; @@ -99,7 +99,7 @@ static void set_desc_src(struct fsldma_chan *chan, } static void set_desc_dst(struct fsldma_chan *chan, - struct fsl_dma_ld_hw *hw, dma_addr_t dst) + struct fsl_dma_ld_hw *hw, dma_addr_t dst) { u64 snoop_bits; @@ -109,7 +109,7 @@ static void set_desc_dst(struct fsldma_chan *chan, } static void set_desc_next(struct fsldma_chan *chan, - struct fsl_dma_ld_hw *hw, dma_addr_t next) + struct fsl_dma_ld_hw *hw, dma_addr_t next) { u64 snoop_bits; @@ -118,8 +118,7 @@ static void set_desc_next(struct fsldma_chan *chan, hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); } -static void set_ld_eol(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) +static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { u64 snoop_bits; @@ -338,8 +337,7 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) chan->feature &= ~FSL_DMA_CHAN_START_EXT; } -static void append_ld_queue(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) +static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); @@ -380,8 +378,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) cookie = chan->common.cookie; list_for_each_entry(child, &desc->tx_list, node) { cookie++; - if (cookie < 0) - cookie = 1; + if (cookie < DMA_MIN_COOKIE) + cookie = DMA_MIN_COOKIE; child->async_tx.cookie = cookie; } @@ -402,8 +400,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) * * Return - The descriptor allocated. NULL for failed. */ -static struct fsl_desc_sw *fsl_dma_alloc_descriptor( - struct fsldma_chan *chan) +static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) { struct fsl_desc_sw *desc; dma_addr_t pdesc; @@ -427,7 +424,6 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( return desc; } - /** * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. * @chan : Freescale DMA channel @@ -537,14 +533,15 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) /* Insert the link descriptor to the LD ring */ list_add_tail(&new->node, &new->tx_list); - /* Set End-of-link to the last link descriptor of new list*/ + /* Set End-of-link to the last link descriptor of new list */ set_ld_eol(chan, new); return &new->async_tx; } -static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( - struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, +static struct dma_async_tx_descriptor * +fsl_dma_prep_memcpy(struct dma_chan *dchan, + dma_addr_t dma_dst, dma_addr_t dma_src, size_t len, unsigned long flags) { struct fsldma_chan *chan; @@ -594,7 +591,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( new->async_tx.flags = flags; /* client is in control of this ack */ new->async_tx.cookie = -EBUSY; - /* Set End-of-link to the last link descriptor of new list*/ + /* Set End-of-link to the last link descriptor of new list */ set_ld_eol(chan, new); return &first->async_tx; diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 113e7134010..49189dacd5f 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -102,8 +102,8 @@ struct fsl_desc_sw { } __attribute__((aligned(32))); struct fsldma_chan_regs { - u32 mr; /* 0x00 - Mode Register */ - u32 sr; /* 0x04 - Status Register */ + u32 mr; /* 0x00 - Mode Register */ + u32 sr; /* 0x04 - Status Register */ u64 cdar; /* 0x08 - Current descriptor address register */ u64 sar; /* 0x10 - Source Address Register */ u64 dar; /* 0x18 - Destination Address Register */ -- cgit v1.2.3 From f04cd40701deace2efb9edd7120e59366bda2118 Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Thu, 3 Mar 2011 07:54:58 +0000 Subject: fsldma: fix controller lockups Enabling poisoning in the dmapool API quickly showed that the DMA controller was fetching descriptors that should not have been in use. This has caused intermittent controller lockups during testing. I have been unable to figure out the exact set of conditions which cause this to happen. However, I believe it is related to the driver using the hardware registers to track whether the controller is busy or not. The code can incorrectly decide that the hardware is idle due to lag between register writes and the hardware actually becoming busy. To fix this, the driver has been reworked to explicitly track the state of the hardware, rather than try to guess what it is doing based on the register values. This has passed dmatest with 10 threads per channel, 100000 iterations per thread several times without error. Previously, this would fail within a few seconds. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 220 +++++++++++++++++++++++---------------------------- drivers/dma/fsldma.h | 1 + 2 files changed, 99 insertions(+), 122 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 5da1a4a817e..6e9ad6edc4a 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -68,11 +68,6 @@ static dma_addr_t get_cdar(struct fsldma_chan *chan) return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; } -static dma_addr_t get_ndar(struct fsldma_chan *chan) -{ - return DMA_IN(chan, &chan->regs->ndar, 64); -} - static u32 get_bcr(struct fsldma_chan *chan) { return DMA_IN(chan, &chan->regs->bcr, 32); @@ -143,13 +138,11 @@ static void dma_init(struct fsldma_chan *chan) case FSL_DMA_IP_85XX: /* Set the channel to below modes: * EIE - Error interrupt enable - * EOSIE - End of segments interrupt enable (basic mode) * EOLNIE - End of links interrupt enable * BWC - Bandwidth sharing among channels */ DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC - | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE - | FSL_DMA_MR_EOSIE, 32); + | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32); break; case FSL_DMA_IP_83XX: /* Set the channel to below modes: @@ -168,25 +161,32 @@ static int dma_is_idle(struct fsldma_chan *chan) return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); } +/* + * Start the DMA controller + * + * Preconditions: + * - the CDAR register must point to the start descriptor + * - the MRn[CS] bit must be cleared + */ static void dma_start(struct fsldma_chan *chan) { u32 mode; mode = DMA_IN(chan, &chan->regs->mr, 32); - if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { - if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { - DMA_OUT(chan, &chan->regs->bcr, 0, 32); - mode |= FSL_DMA_MR_EMP_EN; - } else { - mode &= ~FSL_DMA_MR_EMP_EN; - } + if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { + DMA_OUT(chan, &chan->regs->bcr, 0, 32); + mode |= FSL_DMA_MR_EMP_EN; + } else { + mode &= ~FSL_DMA_MR_EMP_EN; } - if (chan->feature & FSL_DMA_CHAN_START_EXT) + if (chan->feature & FSL_DMA_CHAN_START_EXT) { mode |= FSL_DMA_MR_EMS_EN; - else + } else { + mode &= ~FSL_DMA_MR_EMS_EN; mode |= FSL_DMA_MR_CS; + } DMA_OUT(chan, &chan->regs->mr, mode, 32); } @@ -760,14 +760,15 @@ static int fsl_dma_device_control(struct dma_chan *dchan, switch (cmd) { case DMA_TERMINATE_ALL: + spin_lock_irqsave(&chan->desc_lock, flags); + /* Halt the DMA engine */ dma_halt(chan); - spin_lock_irqsave(&chan->desc_lock, flags); - /* Remove and free all of the descriptors in the LD queue */ fsldma_free_desc_list(chan, &chan->ld_pending); fsldma_free_desc_list(chan, &chan->ld_running); + chan->idle = true; spin_unlock_irqrestore(&chan->desc_lock, flags); return 0; @@ -805,76 +806,43 @@ static int fsl_dma_device_control(struct dma_chan *dchan, } /** - * fsl_dma_update_completed_cookie - Update the completed cookie. + * fsl_chan_ld_cleanup - Clean up link descriptors * @chan : Freescale DMA channel * - * CONTEXT: hardirq + * This function is run after the queue of running descriptors has been + * executed by the DMA engine. It will run any callbacks, and then free + * the descriptors. + * + * HARDWARE STATE: idle */ -static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) +static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) { - struct fsl_desc_sw *desc; + struct fsl_desc_sw *desc, *_desc; unsigned long flags; - dma_cookie_t cookie; spin_lock_irqsave(&chan->desc_lock, flags); + /* if the ld_running list is empty, there is nothing to do */ if (list_empty(&chan->ld_running)) { - chan_dbg(chan, "no running descriptors\n"); + chan_dbg(chan, "no descriptors to cleanup\n"); goto out_unlock; } - /* Get the last descriptor, update the cookie to that */ + /* + * Get the last descriptor, update the cookie to it + * + * This is done before callbacks run so that clients can check the + * status of their DMA transfer inside the callback. + */ desc = to_fsl_desc(chan->ld_running.prev); - if (dma_is_idle(chan)) - cookie = desc->async_tx.cookie; - else { - cookie = desc->async_tx.cookie - 1; - if (unlikely(cookie < DMA_MIN_COOKIE)) - cookie = DMA_MAX_COOKIE; - } - - chan->completed_cookie = cookie; - -out_unlock: - spin_unlock_irqrestore(&chan->desc_lock, flags); -} - -/** - * fsldma_desc_status - Check the status of a descriptor - * @chan: Freescale DMA channel - * @desc: DMA SW descriptor - * - * This function will return the status of the given descriptor - */ -static enum dma_status fsldma_desc_status(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) -{ - return dma_async_is_complete(desc->async_tx.cookie, - chan->completed_cookie, - chan->common.cookie); -} - -/** - * fsl_chan_ld_cleanup - Clean up link descriptors - * @chan : Freescale DMA channel - * - * This function clean up the ld_queue of DMA channel. - */ -static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) -{ - struct fsl_desc_sw *desc, *_desc; - unsigned long flags; - - spin_lock_irqsave(&chan->desc_lock, flags); + chan->completed_cookie = desc->async_tx.cookie; + chan_dbg(chan, "completed_cookie = %d\n", chan->completed_cookie); - chan_dbg(chan, "chan completed_cookie = %d\n", chan->completed_cookie); + /* Run the callback for each descriptor, in order */ list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { dma_async_tx_callback callback; void *callback_param; - if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) - break; - /* Remove from the list of running transactions */ list_del(&desc->node); @@ -898,6 +866,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } +out_unlock: spin_unlock_irqrestore(&chan->desc_lock, flags); } @@ -905,10 +874,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) * fsl_chan_xfer_ld_queue - transfer any pending transactions * @chan : Freescale DMA channel * - * This will make sure that any pending transactions will be run. - * If the DMA controller is idle, it will be started. Otherwise, - * the DMA controller's interrupt handler will start any pending - * transactions when it becomes idle. + * HARDWARE STATE: idle */ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) { @@ -927,22 +893,15 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) } /* - * The DMA controller is not idle, which means the interrupt - * handler will start any queued transactions when it runs - * at the end of the current transaction + * The DMA controller is not idle, which means that the interrupt + * handler will start any queued transactions when it runs after + * this transaction finishes */ - if (!dma_is_idle(chan)) { + if (!chan->idle) { chan_dbg(chan, "DMA controller still busy\n"); goto out_unlock; } - /* - * TODO: - * make sure the dma_halt() function really un-wedges the - * controller as much as possible - */ - dma_halt(chan); - /* * If there are some link descriptors which have not been * transferred, we need to start the controller @@ -952,15 +911,32 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) * Move all elements from the queue of pending transactions * onto the list of running transactions */ + chan_dbg(chan, "idle, starting controller\n"); desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); list_splice_tail_init(&chan->ld_pending, &chan->ld_running); + /* + * The 85xx DMA controller doesn't clear the channel start bit + * automatically at the end of a transfer. Therefore we must clear + * it in software before starting the transfer. + */ + if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { + u32 mode; + + mode = DMA_IN(chan, &chan->regs->mr, 32); + mode &= ~FSL_DMA_MR_CS; + DMA_OUT(chan, &chan->regs->mr, mode, 32); + } + /* * Program the descriptor's address into the DMA controller, * then start the DMA transaction */ set_cdar(chan, desc->async_tx.phys); + get_cdar(chan); + dma_start(chan); + chan->idle = false; out_unlock: spin_unlock_irqrestore(&chan->desc_lock, flags); @@ -985,16 +961,18 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, struct dma_tx_state *txstate) { struct fsldma_chan *chan = to_fsl_chan(dchan); - dma_cookie_t last_used; dma_cookie_t last_complete; + dma_cookie_t last_used; + unsigned long flags; - fsl_chan_ld_cleanup(chan); + spin_lock_irqsave(&chan->desc_lock, flags); - last_used = dchan->cookie; last_complete = chan->completed_cookie; + last_used = dchan->cookie; - dma_set_tx_state(txstate, last_complete, last_used, 0); + spin_unlock_irqrestore(&chan->desc_lock, flags); + dma_set_tx_state(txstate, last_complete, last_used, 0); return dma_async_is_complete(cookie, last_complete, last_used); } @@ -1005,8 +983,6 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, static irqreturn_t fsldma_chan_irq(int irq, void *data) { struct fsldma_chan *chan = data; - int update_cookie = 0; - int xfer_ld_q = 0; u32 stat; /* save and clear the status register */ @@ -1014,6 +990,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) set_sr(chan, stat); chan_dbg(chan, "irq: stat = 0x%x\n", stat); + /* check that this was really our device */ stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); if (!stat) return IRQ_NONE; @@ -1028,28 +1005,9 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) */ if (stat & FSL_DMA_SR_PE) { chan_dbg(chan, "irq: Programming Error INT\n"); - if (get_bcr(chan) == 0) { - /* BCR register is 0, this is a DMA_INTERRUPT async_tx. - * Now, update the completed cookie, and continue the - * next uncompleted transfer. - */ - update_cookie = 1; - xfer_ld_q = 1; - } stat &= ~FSL_DMA_SR_PE; - } - - /* - * If the link descriptor segment transfer finishes, - * we will recycle the used descriptor. - */ - if (stat & FSL_DMA_SR_EOSI) { - chan_dbg(chan, "irq: End-of-segments INT\n"); - chan_dbg(chan, "irq: clndar 0x%llx, nlndar 0x%llx\n", - (unsigned long long)get_cdar(chan), - (unsigned long long)get_ndar(chan)); - stat &= ~FSL_DMA_SR_EOSI; - update_cookie = 1; + if (get_bcr(chan) != 0) + chan_err(chan, "Programming Error!\n"); } /* @@ -1059,8 +1017,6 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) if (stat & FSL_DMA_SR_EOCDI) { chan_dbg(chan, "irq: End-of-Chain link INT\n"); stat &= ~FSL_DMA_SR_EOCDI; - update_cookie = 1; - xfer_ld_q = 1; } /* @@ -1071,25 +1027,44 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) if (stat & FSL_DMA_SR_EOLNI) { chan_dbg(chan, "irq: End-of-link INT\n"); stat &= ~FSL_DMA_SR_EOLNI; - xfer_ld_q = 1; } - if (update_cookie) - fsl_dma_update_completed_cookie(chan); - if (xfer_ld_q) - fsl_chan_xfer_ld_queue(chan); + /* check that the DMA controller is really idle */ + if (!dma_is_idle(chan)) + chan_err(chan, "irq: controller not idle!\n"); + + /* check that we handled all of the bits */ if (stat) - chan_dbg(chan, "irq: unhandled sr 0x%08x\n", stat); + chan_err(chan, "irq: unhandled sr 0x%08x\n", stat); - chan_dbg(chan, "irq: Exit\n"); + /* + * Schedule the tasklet to handle all cleanup of the current + * transaction. It will start a new transaction if there is + * one pending. + */ tasklet_schedule(&chan->tasklet); + chan_dbg(chan, "irq: Exit\n"); return IRQ_HANDLED; } static void dma_do_tasklet(unsigned long data) { struct fsldma_chan *chan = (struct fsldma_chan *)data; + unsigned long flags; + + chan_dbg(chan, "tasklet entry\n"); + + /* run all callbacks, free all used descriptors */ fsl_chan_ld_cleanup(chan); + + /* the channel is now idle */ + spin_lock_irqsave(&chan->desc_lock, flags); + chan->idle = true; + spin_unlock_irqrestore(&chan->desc_lock, flags); + + /* start any pending transactions automatically */ + fsl_chan_xfer_ld_queue(chan); + chan_dbg(chan, "tasklet exit\n"); } static irqreturn_t fsldma_ctrl_irq(int irq, void *data) @@ -1269,6 +1244,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, spin_lock_init(&chan->desc_lock); INIT_LIST_HEAD(&chan->ld_pending); INIT_LIST_HEAD(&chan->ld_running); + chan->idle = true; chan->common.device = &fdev->common; diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 49189dacd5f..9cb5aa57c67 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -148,6 +148,7 @@ struct fsldma_chan { int id; /* Raw id of this channel */ struct tasklet_struct tasklet; u32 feature; + bool idle; /* DMA controller is idle */ void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); -- cgit v1.2.3 From 9c4d1e7bdeb1ed4dc0c3341d40662a6fbc5f2dc2 Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Thu, 3 Mar 2011 07:54:59 +0000 Subject: fsldma: support async_tx dependencies and automatic unmapping Previous to this patch, the dma_run_dependencies() function has been called while holding desc_lock. This function can call tx_submit() for other descriptors, which may try to re-grab the lock. Avoid this by moving the descriptors to be cleaned up to a temporary list, and dropping the lock before cleanup. At the same time, add support for automatic unmapping of src and dst buffers, as offered by the DMAEngine API. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 131 +++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 95 insertions(+), 36 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 6e9ad6edc4a..526579df603 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -83,6 +83,11 @@ static void set_desc_cnt(struct fsldma_chan *chan, hw->count = CPU_TO_DMA(chan, count, 32); } +static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) +{ + return DMA_TO_CPU(chan, desc->hw.count, 32); +} + static void set_desc_src(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t src) { @@ -93,6 +98,16 @@ static void set_desc_src(struct fsldma_chan *chan, hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); } +static dma_addr_t get_desc_src(struct fsldma_chan *chan, + struct fsl_desc_sw *desc) +{ + u64 snoop_bits; + + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) + ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; + return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; +} + static void set_desc_dst(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t dst) { @@ -103,6 +118,16 @@ static void set_desc_dst(struct fsldma_chan *chan, hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); } +static dma_addr_t get_desc_dst(struct fsldma_chan *chan, + struct fsl_desc_sw *desc) +{ + u64 snoop_bits; + + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) + ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; + return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; +} + static void set_desc_next(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t next) { @@ -805,6 +830,57 @@ static int fsl_dma_device_control(struct dma_chan *dchan, return 0; } +/** + * fsldma_cleanup_descriptor - cleanup and free a single link descriptor + * @chan: Freescale DMA channel + * @desc: descriptor to cleanup and free + * + * This function is used on a descriptor which has been executed by the DMA + * controller. It will run any callbacks, submit any dependencies, and then + * free the descriptor. + */ +static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, + struct fsl_desc_sw *desc) +{ + struct dma_async_tx_descriptor *txd = &desc->async_tx; + struct device *dev = chan->common.device->dev; + dma_addr_t src = get_desc_src(chan, desc); + dma_addr_t dst = get_desc_dst(chan, desc); + u32 len = get_desc_cnt(chan, desc); + + /* Run the link descriptor callback function */ + if (txd->callback) { +#ifdef FSL_DMA_LD_DEBUG + chan_dbg(chan, "LD %p callback\n", desc); +#endif + txd->callback(txd->callback_param); + } + + /* Run any dependencies */ + dma_run_dependencies(txd); + + /* Unmap the dst buffer, if requested */ + if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) + dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE); + else + dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE); + } + + /* Unmap the src buffer, if requested */ + if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) + dma_unmap_single(dev, src, len, DMA_TO_DEVICE); + else + dma_unmap_page(dev, src, len, DMA_TO_DEVICE); + } + +#ifdef FSL_DMA_LD_DEBUG + chan_dbg(chan, "LD %p free\n", desc); +#endif + dma_pool_free(chan->desc_pool, desc, txd->phys); +} + /** * fsl_chan_ld_cleanup - Clean up link descriptors * @chan : Freescale DMA channel @@ -818,56 +894,39 @@ static int fsl_dma_device_control(struct dma_chan *dchan, static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) { struct fsl_desc_sw *desc, *_desc; + LIST_HEAD(ld_cleanup); unsigned long flags; spin_lock_irqsave(&chan->desc_lock, flags); - /* if the ld_running list is empty, there is nothing to do */ - if (list_empty(&chan->ld_running)) { - chan_dbg(chan, "no descriptors to cleanup\n"); - goto out_unlock; + /* update the cookie if we have some descriptors to cleanup */ + if (!list_empty(&chan->ld_running)) { + dma_cookie_t cookie; + + desc = to_fsl_desc(chan->ld_running.prev); + cookie = desc->async_tx.cookie; + + chan->completed_cookie = cookie; + chan_dbg(chan, "completed cookie=%d\n", cookie); } /* - * Get the last descriptor, update the cookie to it - * - * This is done before callbacks run so that clients can check the - * status of their DMA transfer inside the callback. + * move the descriptors to a temporary list so we can drop the lock + * during the entire cleanup operation */ - desc = to_fsl_desc(chan->ld_running.prev); - chan->completed_cookie = desc->async_tx.cookie; - chan_dbg(chan, "completed_cookie = %d\n", chan->completed_cookie); + list_splice_tail_init(&chan->ld_running, &ld_cleanup); + + spin_unlock_irqrestore(&chan->desc_lock, flags); /* Run the callback for each descriptor, in order */ - list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { - dma_async_tx_callback callback; - void *callback_param; + list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) { - /* Remove from the list of running transactions */ + /* Remove from the list of transactions */ list_del(&desc->node); - /* Run the link descriptor callback function */ - callback = desc->async_tx.callback; - callback_param = desc->async_tx.callback_param; - if (callback) { - spin_unlock_irqrestore(&chan->desc_lock, flags); -#ifdef FSL_DMA_LD_DEBUG - chan_dbg(chan, "LD %p callback\n", desc); -#endif - callback(callback_param); - spin_lock_irqsave(&chan->desc_lock, flags); - } - - /* Run any dependencies, then free the descriptor */ - dma_run_dependencies(&desc->async_tx); -#ifdef FSL_DMA_LD_DEBUG - chan_dbg(chan, "LD %p free\n", desc); -#endif - dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); + /* Run all cleanup for this descriptor */ + fsldma_cleanup_descriptor(chan, desc); } - -out_unlock: - spin_unlock_irqrestore(&chan->desc_lock, flags); } /** -- cgit v1.2.3 From dc8d4091575ba81e886ebcdfd1e559c981f82f86 Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Thu, 3 Mar 2011 07:55:00 +0000 Subject: fsldma: reduce locking during descriptor cleanup This merges the fsl_chan_ld_cleanup() function into the dma_do_tasklet() function to reduce locking overhead. In the best case, we will be able to keep the DMA controller busy while we are freeing used descriptors. In all cases, the spinlock is grabbed two times fewer than before on each transaction. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 108 ++++++++++++++++++++++----------------------------- 1 file changed, 46 insertions(+), 62 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 526579df603..d300de456c9 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -881,66 +881,16 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, dma_pool_free(chan->desc_pool, desc, txd->phys); } -/** - * fsl_chan_ld_cleanup - Clean up link descriptors - * @chan : Freescale DMA channel - * - * This function is run after the queue of running descriptors has been - * executed by the DMA engine. It will run any callbacks, and then free - * the descriptors. - * - * HARDWARE STATE: idle - */ -static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) -{ - struct fsl_desc_sw *desc, *_desc; - LIST_HEAD(ld_cleanup); - unsigned long flags; - - spin_lock_irqsave(&chan->desc_lock, flags); - - /* update the cookie if we have some descriptors to cleanup */ - if (!list_empty(&chan->ld_running)) { - dma_cookie_t cookie; - - desc = to_fsl_desc(chan->ld_running.prev); - cookie = desc->async_tx.cookie; - - chan->completed_cookie = cookie; - chan_dbg(chan, "completed cookie=%d\n", cookie); - } - - /* - * move the descriptors to a temporary list so we can drop the lock - * during the entire cleanup operation - */ - list_splice_tail_init(&chan->ld_running, &ld_cleanup); - - spin_unlock_irqrestore(&chan->desc_lock, flags); - - /* Run the callback for each descriptor, in order */ - list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) { - - /* Remove from the list of transactions */ - list_del(&desc->node); - - /* Run all cleanup for this descriptor */ - fsldma_cleanup_descriptor(chan, desc); - } -} - /** * fsl_chan_xfer_ld_queue - transfer any pending transactions * @chan : Freescale DMA channel * * HARDWARE STATE: idle + * LOCKING: must hold chan->desc_lock */ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) { struct fsl_desc_sw *desc; - unsigned long flags; - - spin_lock_irqsave(&chan->desc_lock, flags); /* * If the list of pending descriptors is empty, then we @@ -948,7 +898,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) */ if (list_empty(&chan->ld_pending)) { chan_dbg(chan, "no pending LDs\n"); - goto out_unlock; + return; } /* @@ -958,7 +908,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) */ if (!chan->idle) { chan_dbg(chan, "DMA controller still busy\n"); - goto out_unlock; + return; } /* @@ -996,9 +946,6 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) dma_start(chan); chan->idle = false; - -out_unlock: - spin_unlock_irqrestore(&chan->desc_lock, flags); } /** @@ -1008,7 +955,11 @@ out_unlock: static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) { struct fsldma_chan *chan = to_fsl_chan(dchan); + unsigned long flags; + + spin_lock_irqsave(&chan->desc_lock, flags); fsl_chan_xfer_ld_queue(chan); + spin_unlock_irqrestore(&chan->desc_lock, flags); } /** @@ -1109,20 +1060,53 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) static void dma_do_tasklet(unsigned long data) { struct fsldma_chan *chan = (struct fsldma_chan *)data; + struct fsl_desc_sw *desc, *_desc; + LIST_HEAD(ld_cleanup); unsigned long flags; chan_dbg(chan, "tasklet entry\n"); - /* run all callbacks, free all used descriptors */ - fsl_chan_ld_cleanup(chan); - - /* the channel is now idle */ spin_lock_irqsave(&chan->desc_lock, flags); + + /* update the cookie if we have some descriptors to cleanup */ + if (!list_empty(&chan->ld_running)) { + dma_cookie_t cookie; + + desc = to_fsl_desc(chan->ld_running.prev); + cookie = desc->async_tx.cookie; + + chan->completed_cookie = cookie; + chan_dbg(chan, "completed_cookie=%d\n", cookie); + } + + /* + * move the descriptors to a temporary list so we can drop the lock + * during the entire cleanup operation + */ + list_splice_tail_init(&chan->ld_running, &ld_cleanup); + + /* the hardware is now idle and ready for more */ chan->idle = true; - spin_unlock_irqrestore(&chan->desc_lock, flags); - /* start any pending transactions automatically */ + /* + * Start any pending transactions automatically + * + * In the ideal case, we keep the DMA controller busy while we go + * ahead and free the descriptors below. + */ fsl_chan_xfer_ld_queue(chan); + spin_unlock_irqrestore(&chan->desc_lock, flags); + + /* Run the callback for each descriptor, in order */ + list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) { + + /* Remove from the list of transactions */ + list_del(&desc->node); + + /* Run all cleanup for this descriptor */ + fsldma_cleanup_descriptor(chan, desc); + } + chan_dbg(chan, "tasklet exit\n"); } -- cgit v1.2.3 From a00ae34ac8bc8a5897d9b6b9b685c39b955b14b9 Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Thu, 3 Mar 2011 07:55:01 +0000 Subject: fsldma: make halt behave nicely on all supported controllers The original dma_halt() function set the CA (channel abort) bit on both the 83xx and 85xx controllers. This is incorrect on the 83xx, where this bit means TEM (transfer error mask) instead. The 83xx doesn't support channel abort, so we only do this operation on 85xx. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index d300de456c9..8670a501212 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -221,13 +221,26 @@ static void dma_halt(struct fsldma_chan *chan) u32 mode; int i; + /* read the mode register */ mode = DMA_IN(chan, &chan->regs->mr, 32); - mode |= FSL_DMA_MR_CA; - DMA_OUT(chan, &chan->regs->mr, mode, 32); - mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); + /* + * The 85xx controller supports channel abort, which will stop + * the current transfer. On 83xx, this bit is the transfer error + * mask bit, which should not be changed. + */ + if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { + mode |= FSL_DMA_MR_CA; + DMA_OUT(chan, &chan->regs->mr, mode, 32); + + mode &= ~FSL_DMA_MR_CA; + } + + /* stop the DMA controller */ + mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN); DMA_OUT(chan, &chan->regs->mr, mode, 32); + /* wait for the DMA controller to become idle */ for (i = 0; i < 100; i++) { if (dma_is_idle(chan)) return; -- cgit v1.2.3 From 5b9a4f98b2e29fb92a4a54ef12b2e3940f941ed9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Tue, 22 Mar 2011 10:35:17 +0100 Subject: dma: let IMX_DMA depend on IMX_HAVE_DMA_V1 instead of an explicit list of SoCs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As a side effect this makes IMX_DMA selectable on i.MX21 again, because the symbol ARCH_MX21 doesn't exist (MACH_MX21 would have been more correct). Signed-off-by: Uwe Kleine-König Signed-off-by: Dan Williams --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d700895cb40..a572600e44e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -221,7 +221,7 @@ config IMX_SDMA config IMX_DMA tristate "i.MX DMA support" - depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27 + depends on IMX_HAVE_DMA_V1 select DMA_ENGINE help Support the i.MX DMA engine. This engine is integrated into -- cgit v1.2.3 From d42efe6bfb4eed8314c8ce3547f21954a4140399 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 22 Mar 2011 17:27:25 +0530 Subject: dmaengine/dmatest: Pass timeout via module params When we try to test all channels present on our controller together, some channels of lower priority may be very slow as compared to others. If number of transfers is unlimited, some channels may timeout and will not finish within 3 seconds. Thus, while doing such regress testing we may need to have higher value of timeouts. This patch adds support for passing timeout value via module parameters. Default value is 3 msec, a negative value means max timeout possible. Signed-off-by: Viresh Kumar Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 7e1b0aa0ca5..e0888cb538d 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -54,6 +54,11 @@ module_param(pq_sources, uint, S_IRUGO); MODULE_PARM_DESC(pq_sources, "Number of p+q source buffers (default: 3)"); +static int timeout = 3000; +module_param(timeout, uint, S_IRUGO); +MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), \ + Pass -1 for infinite timeout"); + /* * Initialization patterns. All bytes in the source buffer has bit 7 * set, all bytes in the destination buffer has bit 7 cleared. @@ -299,7 +304,7 @@ static int dmatest_func(void *data) dma_addr_t dma_srcs[src_cnt]; dma_addr_t dma_dsts[dst_cnt]; struct completion cmp; - unsigned long tmo = msecs_to_jiffies(3000); + unsigned long tmo = msecs_to_jiffies(timeout); u8 align = 0; total_tests++; -- cgit v1.2.3 From 3ea205c449d2b5996d0256aa8b2894f7aea228a2 Mon Sep 17 00:00:00 2001 From: Jamie Iles Date: Tue, 22 Mar 2011 15:34:56 -0700 Subject: avr32: at32ap700x: fix typo in DMA master configuration Commit 4aa5f366431fe (avr32: at32ap700x: specify DMA src and dst masters) specified the masters for the ac97c playback device but incorrectly set them in the capture slave information rather than playback. Cc: Hans-Christian Egtvedt Reported-by: Nicolas Ferre Signed-off-by: Jamie Iles Acked-by: Vinod Koul [rebased on dmaengine for 2.6.39 (d42efe6b)] Signed-off-by: Dan Williams --- arch/avr32/mach-at32ap/at32ap700x.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index b4aaebd8780..bfc9d071db9 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -2061,8 +2061,8 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; tx_dws->cfg_hi = DWC_CFGH_DST_PER(4); tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); - rx_dws->src_master = 0; - rx_dws->dst_master = 1; + tx_dws->src_master = 0; + tx_dws->dst_master = 1; tx_dws->src_msize = DW_DMA_MSIZE_1; tx_dws->dst_msize = DW_DMA_MSIZE_1; tx_dws->fc = DW_DMA_FC_D_M2P; -- cgit v1.2.3