summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt11
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/mach-omap1/board-h2-mmc.c1
-rw-r--r--arch/arm/mach-omap1/board-h3-mmc.c1
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c1
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c1
-rw-r--r--arch/arm/mach-omap2/hsmmc.c1
-rw-r--r--arch/arm/mach-spear3xx/spear300.c26
-rw-r--r--arch/arm/mach-spear3xx/spear310.c26
-rw-r--r--arch/arm/mach-spear3xx/spear320.c26
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c3
-rw-r--r--arch/arm/mach-spear6xx/spear6xx.c51
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h2
-rw-r--r--arch/arm/plat-spear/include/plat/pl080.h6
-rw-r--r--arch/arm/plat-spear/pl080.c10
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/amba-pl08x.c941
-rw-r--r--drivers/dma/omap-dma.c669
-rw-r--r--drivers/dma/sa11x0-dma.c388
-rw-r--r--drivers/dma/virt-dma.c123
-rw-r--r--drivers/dma/virt-dma.h152
-rw-r--r--drivers/mmc/host/omap.c368
-rw-r--r--drivers/mmc/host/omap_hsmmc.c204
-rw-r--r--drivers/mtd/nand/omap2.c106
-rw-r--r--drivers/spi/spi-omap2-mcspi.c229
-rw-r--r--include/linux/amba/pl08x.h156
-rw-r--r--include/linux/omap-dma.h22
28 files changed, 2124 insertions, 1415 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 72ed15075f7..afaff312bf4 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -626,3 +626,14 @@ Why: New drivers should use new V4L2_CAP_VIDEO_M2M capability flag
Who: Sylwester Nawrocki <s.nawrocki@samsung.com>
----------------------------
+
+What: OMAP private DMA implementation
+When: 2013
+Why: We have a DMA engine implementation; all users should be updated
+ to use this rather than persisting with the old APIs. The old APIs
+ block merging the old DMA engine implementation into the DMA
+ engine driver.
+Who: Russell King <linux@arm.linux.org.uk>,
+ Santosh Shilimkar <santosh.shilimkar@ti.com>
+
+----------------------------
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index b152de79fd9..e58edc36b40 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -193,6 +193,8 @@ CONFIG_MMC_OMAP_HS=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_TWL92330=y
CONFIG_RTC_DRV_TWL4030=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_OMAP=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_FS_XATTR is not set
diff --git a/arch/arm/mach-omap1/board-h2-mmc.c b/arch/arm/mach-omap1/board-h2-mmc.c
index da0e37d4082..e1362ce4849 100644
--- a/arch/arm/mach-omap1/board-h2-mmc.c
+++ b/arch/arm/mach-omap1/board-h2-mmc.c
@@ -54,7 +54,6 @@ static struct omap_mmc_platform_data mmc1_data = {
.nr_slots = 1,
.init = mmc_late_init,
.cleanup = mmc_cleanup,
- .dma_mask = 0xffffffff,
.slots[0] = {
.set_power = mmc_set_power,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
diff --git a/arch/arm/mach-omap1/board-h3-mmc.c b/arch/arm/mach-omap1/board-h3-mmc.c
index f8242aa9b76..c74daace8cd 100644
--- a/arch/arm/mach-omap1/board-h3-mmc.c
+++ b/arch/arm/mach-omap1/board-h3-mmc.c
@@ -36,7 +36,6 @@ static int mmc_set_power(struct device *dev, int slot, int power_on,
*/
static struct omap_mmc_platform_data mmc1_data = {
.nr_slots = 1,
- .dma_mask = 0xffffffff,
.slots[0] = {
.set_power = mmc_set_power,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 4007a372481..2c0ca8fc338 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -185,7 +185,6 @@ static int nokia770_mmc_get_cover_state(struct device *dev, int slot)
static struct omap_mmc_platform_data nokia770_mmc2_data = {
.nr_slots = 1,
- .dma_mask = 0xffffffff,
.max_freq = 12000000,
.slots[0] = {
.set_power = nokia770_mmc_set_power,
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 2c5d0ed7528..677357ff61a 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -468,7 +468,6 @@ static struct omap_mmc_platform_data mmc1_data = {
.cleanup = n8x0_mmc_cleanup,
.shutdown = n8x0_mmc_shutdown,
.max_freq = 24000000,
- .dma_mask = 0xffffffff,
.slots[0] = {
.wires = 4,
.set_power = n8x0_mmc_set_power,
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index be697d4e084..a9675d8d182 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -315,7 +315,6 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
mmc->slots[0].caps = c->caps;
mmc->slots[0].pm_caps = c->pm_caps;
mmc->slots[0].internal_clock = !c->ext_clock;
- mmc->dma_mask = 0xffffffff;
mmc->max_freq = c->max_freq;
if (cpu_is_omap44xx())
mmc->reg_offset = OMAP4_MMC_REG_OFFSET;
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
index 0f882ecb7d8..6ec30054996 100644
--- a/arch/arm/mach-spear3xx/spear300.c
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -120,182 +120,156 @@ struct pl08x_channel_data spear300_dma_info[] = {
.min_signal = 2,
.max_signal = 2,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart0_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp0_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp0_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "irda",
.min_signal = 12,
.max_signal = 12,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "adc",
.min_signal = 13,
.max_signal = 13,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "to_jpeg",
.min_signal = 14,
.max_signal = 14,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "from_jpeg",
.min_signal = 15,
.max_signal = 15,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras0_rx",
.min_signal = 0,
.max_signal = 0,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras0_tx",
.min_signal = 1,
.max_signal = 1,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras1_rx",
.min_signal = 2,
.max_signal = 2,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras1_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras2_rx",
.min_signal = 4,
.max_signal = 4,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras2_tx",
.min_signal = 5,
.max_signal = 5,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras3_rx",
.min_signal = 6,
.max_signal = 6,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras3_tx",
.min_signal = 7,
.max_signal = 7,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras4_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras4_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras5_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras5_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras6_rx",
.min_signal = 12,
.max_signal = 12,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras6_tx",
.min_signal = 13,
.max_signal = 13,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras7_rx",
.min_signal = 14,
.max_signal = 14,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras7_tx",
.min_signal = 15,
.max_signal = 15,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
},
};
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
index bbcf4571d36..1d0e435b904 100644
--- a/arch/arm/mach-spear3xx/spear310.c
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -205,182 +205,156 @@ struct pl08x_channel_data spear310_dma_info[] = {
.min_signal = 2,
.max_signal = 2,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart0_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp0_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp0_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "irda",
.min_signal = 12,
.max_signal = 12,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "adc",
.min_signal = 13,
.max_signal = 13,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "to_jpeg",
.min_signal = 14,
.max_signal = 14,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "from_jpeg",
.min_signal = 15,
.max_signal = 15,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart1_rx",
.min_signal = 0,
.max_signal = 0,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart1_tx",
.min_signal = 1,
.max_signal = 1,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart2_rx",
.min_signal = 2,
.max_signal = 2,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart2_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart3_rx",
.min_signal = 4,
.max_signal = 4,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart3_tx",
.min_signal = 5,
.max_signal = 5,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart4_rx",
.min_signal = 6,
.max_signal = 6,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart4_tx",
.min_signal = 7,
.max_signal = 7,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart5_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart5_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras5_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras5_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras6_rx",
.min_signal = 12,
.max_signal = 12,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras6_tx",
.min_signal = 13,
.max_signal = 13,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras7_rx",
.min_signal = 14,
.max_signal = 14,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras7_tx",
.min_signal = 15,
.max_signal = 15,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
},
};
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
index 88d483bcd66..fd823c62457 100644
--- a/arch/arm/mach-spear3xx/spear320.c
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -213,182 +213,156 @@ struct pl08x_channel_data spear320_dma_info[] = {
.min_signal = 2,
.max_signal = 2,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart0_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp0_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp0_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c0_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c0_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "irda",
.min_signal = 12,
.max_signal = 12,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "adc",
.min_signal = 13,
.max_signal = 13,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "to_jpeg",
.min_signal = 14,
.max_signal = 14,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "from_jpeg",
.min_signal = 15,
.max_signal = 15,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp1_rx",
.min_signal = 0,
.max_signal = 0,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ssp1_tx",
.min_signal = 1,
.max_signal = 1,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ssp2_rx",
.min_signal = 2,
.max_signal = 2,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ssp2_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "uart1_rx",
.min_signal = 4,
.max_signal = 4,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "uart1_tx",
.min_signal = 5,
.max_signal = 5,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "uart2_rx",
.min_signal = 6,
.max_signal = 6,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "uart2_tx",
.min_signal = 7,
.max_signal = 7,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2c1_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2c1_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2c2_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2c2_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2s_rx",
.min_signal = 12,
.max_signal = 12,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2s_tx",
.min_signal = 13,
.max_signal = 13,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "rs485_rx",
.min_signal = 14,
.max_signal = 14,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "rs485_tx",
.min_signal = 15,
.max_signal = 15,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
},
};
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index 66db5f13af8..98144baf888 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -46,7 +46,8 @@ struct pl022_ssp_controller pl022_plat_data = {
struct pl08x_platform_data pl080_plat_data = {
.memcpy_channel = {
.bus_id = "memcpy",
- .cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
+ .cctl_memcpy =
+ (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \
PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \
PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
index 9af67d003c6..5a5a52db252 100644
--- a/arch/arm/mach-spear6xx/spear6xx.c
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -36,336 +36,288 @@ static struct pl08x_channel_data spear600_dma_info[] = {
.min_signal = 0,
.max_signal = 0,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp1_tx",
.min_signal = 1,
.max_signal = 1,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart0_rx",
.min_signal = 2,
.max_signal = 2,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart0_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart1_rx",
.min_signal = 4,
.max_signal = 4,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart1_tx",
.min_signal = 5,
.max_signal = 5,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp2_rx",
.min_signal = 6,
.max_signal = 6,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ssp2_tx",
.min_signal = 7,
.max_signal = 7,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ssp0_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp0_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "irda",
.min_signal = 12,
.max_signal = 12,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "adc",
.min_signal = 13,
.max_signal = 13,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "to_jpeg",
.min_signal = 14,
.max_signal = 14,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "from_jpeg",
.min_signal = 15,
.max_signal = 15,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras0_rx",
.min_signal = 0,
.max_signal = 0,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras0_tx",
.min_signal = 1,
.max_signal = 1,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras1_rx",
.min_signal = 2,
.max_signal = 2,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras1_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras2_rx",
.min_signal = 4,
.max_signal = 4,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras2_tx",
.min_signal = 5,
.max_signal = 5,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras3_rx",
.min_signal = 6,
.max_signal = 6,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras3_tx",
.min_signal = 7,
.max_signal = 7,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras4_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras4_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras5_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras5_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras6_rx",
.min_signal = 12,
.max_signal = 12,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras6_tx",
.min_signal = 13,
.max_signal = 13,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras7_rx",
.min_signal = 14,
.max_signal = 14,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras7_tx",
.min_signal = 15,
.max_signal = 15,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ext0_rx",
.min_signal = 0,
.max_signal = 0,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext0_tx",
.min_signal = 1,
.max_signal = 1,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext1_rx",
.min_signal = 2,
.max_signal = 2,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext1_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext2_rx",
.min_signal = 4,
.max_signal = 4,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext2_tx",
.min_signal = 5,
.max_signal = 5,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext3_rx",
.min_signal = 6,
.max_signal = 6,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext3_tx",
.min_signal = 7,
.max_signal = 7,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext4_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext4_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext5_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext5_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext6_rx",
.min_signal = 12,
.max_signal = 12,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext6_tx",
.min_signal = 13,
.max_signal = 13,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext7_rx",
.min_signal = 14,
.max_signal = 14,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext7_tx",
.min_signal = 15,
.max_signal = 15,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
},
};
@@ -373,7 +325,8 @@ static struct pl08x_channel_data spear600_dma_info[] = {
struct pl08x_platform_data pl080_plat_data = {
.memcpy_channel = {
.bus_id = "memcpy",
- .cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
+ .cctl_memcpy =
+ (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \
PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \
PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index 5493bd95da5..eb3e4d55534 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -81,8 +81,6 @@ struct omap_mmc_platform_data {
/* Return context loss count due to PM states changing */
int (*get_context_loss_count)(struct device *dev);
- u64 dma_mask;
-
/* Integrating attributes from the omap_hwmod layer */
u8 controller_flags;
diff --git a/arch/arm/plat-spear/include/plat/pl080.h b/arch/arm/plat-spear/include/plat/pl080.h
index 2bc6b54460a..eb6590ded40 100644
--- a/arch/arm/plat-spear/include/plat/pl080.h
+++ b/arch/arm/plat-spear/include/plat/pl080.h
@@ -14,8 +14,8 @@
#ifndef __PLAT_PL080_H
#define __PLAT_PL080_H
-struct pl08x_dma_chan;
-int pl080_get_signal(struct pl08x_dma_chan *ch);
-void pl080_put_signal(struct pl08x_dma_chan *ch);
+struct pl08x_channel_data;
+int pl080_get_signal(const struct pl08x_channel_data *cd);
+void pl080_put_signal(const struct pl08x_channel_data *cd, int signal);
#endif /* __PLAT_PL080_H */
diff --git a/arch/arm/plat-spear/pl080.c b/arch/arm/plat-spear/pl080.c
index 12cf27f935f..cfa1199d0f4 100644
--- a/arch/arm/plat-spear/pl080.c
+++ b/arch/arm/plat-spear/pl080.c
@@ -27,9 +27,8 @@ struct {
unsigned char val;
} signals[16] = {{0, 0}, };
-int pl080_get_signal(struct pl08x_dma_chan *ch)
+int pl080_get_signal(const struct pl08x_channel_data *cd)
{
- const struct pl08x_channel_data *cd = ch->cd;
unsigned int signal = cd->min_signal, val;
unsigned long flags;
@@ -63,18 +62,17 @@ int pl080_get_signal(struct pl08x_dma_chan *ch)
return signal;
}
-void pl080_put_signal(struct pl08x_dma_chan *ch)
+void pl080_put_signal(const struct pl08x_channel_data *cd, int signal)
{
- const struct pl08x_channel_data *cd = ch->cd;
unsigned long flags;
spin_lock_irqsave(&lock, flags);
/* if signal is not used */
- if (!signals[cd->min_signal].busy)
+ if (!signals[signal].busy)
BUG();
- signals[cd->min_signal].busy--;
+ signals[signal].busy--;
spin_unlock_irqrestore(&lock, flags);
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d45cf1bcbde..d06ea2950dd 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -53,6 +53,7 @@ config AMBA_PL08X
bool "ARM PrimeCell PL080 or PL081 support"
depends on ARM_AMBA && EXPERIMENTAL
select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
help
Platform has a PL08x DMAC device
which can provide DMA engine support
@@ -269,6 +270,7 @@ config DMA_SA11X0
tristate "SA-11x0 DMA support"
depends on ARCH_SA1100
select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
help
Support the DMA engine found on Intel StrongARM SA-1100 and
SA-1110 SoCs. This DMA engine can only be used with on-chip
@@ -284,9 +286,18 @@ config MMP_TDMA
Say Y here if you enabled MMP ADMA, otherwise say N.
+config DMA_OMAP
+ tristate "OMAP DMA support"
+ depends on ARCH_OMAP
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+
config DMA_ENGINE
bool
+config DMA_VIRTUAL_CHANNELS
+ tristate
+
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 640356add0a..4cf6b128ab9 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -2,6 +2,7 @@ ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG
ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
+obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
obj-$(CONFIG_DMATEST) += dmatest.o
@@ -30,3 +31,4 @@ obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
+obj-$(CONFIG_DMA_OMAP) += omap-dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 49ecbbb8932..6fbeebb9486 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -86,10 +86,12 @@
#include <asm/hardware/pl080.h>
#include "dmaengine.h"
+#include "virt-dma.h"
#define DRIVER_NAME "pl08xdmac"
static struct amba_driver pl08x_amba_driver;
+struct pl08x_driver_data;
/**
* struct vendor_data - vendor-specific config parameters for PL08x derivatives
@@ -119,6 +121,123 @@ struct pl08x_lli {
};
/**
+ * struct pl08x_bus_data - information of source or destination
+ * busses for a transfer
+ * @addr: current address
+ * @maxwidth: the maximum width of a transfer on this bus
+ * @buswidth: the width of this bus in bytes: 1, 2 or 4
+ */
+struct pl08x_bus_data {
+ dma_addr_t addr;
+ u8 maxwidth;
+ u8 buswidth;
+};
+
+/**
+ * struct pl08x_phy_chan - holder for the physical channels
+ * @id: physical index to this channel
+ * @lock: a lock to use when altering an instance of this struct
+ * @serving: the virtual channel currently being served by this physical
+ * channel
+ * @locked: channel unavailable for the system, e.g. dedicated to secure
+ * world
+ */
+struct pl08x_phy_chan {
+ unsigned int id;
+ void __iomem *base;
+ spinlock_t lock;
+ struct pl08x_dma_chan *serving;
+ bool locked;
+};
+
+/**
+ * struct pl08x_sg - structure containing data per sg
+ * @src_addr: src address of sg
+ * @dst_addr: dst address of sg
+ * @len: transfer len in bytes
+ * @node: node for txd's dsg_list
+ */
+struct pl08x_sg {
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ size_t len;
+ struct list_head node;
+};
+
+/**
+ * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
+ * @vd: virtual DMA descriptor
+ * @dsg_list: list of children sg's
+ * @llis_bus: DMA memory address (physical) start for the LLIs
+ * @llis_va: virtual memory address start for the LLIs
+ * @cctl: control reg values for current txd
+ * @ccfg: config reg values for current txd
+ * @done: this marks completed descriptors, which should not have their
+ * mux released.
+ */
+struct pl08x_txd {
+ struct virt_dma_desc vd;
+ struct list_head dsg_list;
+ dma_addr_t llis_bus;
+ struct pl08x_lli *llis_va;
+ /* Default cctl value for LLIs */
+ u32 cctl;
+ /*
+ * Settings to be put into the physical channel when we
+ * trigger this txd. Other registers are in llis_va[0].
+ */
+ u32 ccfg;
+ bool done;
+};
+
+/**
+ * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
+ * states
+ * @PL08X_CHAN_IDLE: the channel is idle
+ * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
+ * channel and is running a transfer on it
+ * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
+ * channel, but the transfer is currently paused
+ * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
+ * channel to become available (only pertains to memcpy channels)
+ */
+enum pl08x_dma_chan_state {
+ PL08X_CHAN_IDLE,
+ PL08X_CHAN_RUNNING,
+ PL08X_CHAN_PAUSED,
+ PL08X_CHAN_WAITING,
+};
+
+/**
+ * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
+ * @vc: wrappped virtual channel
+ * @phychan: the physical channel utilized by this channel, if there is one
+ * @name: name of channel
+ * @cd: channel platform data
+ * @runtime_addr: address for RX/TX according to the runtime config
+ * @at: active transaction on this channel
+ * @lock: a lock for this channel data
+ * @host: a pointer to the host (internal use)
+ * @state: whether the channel is idle, paused, running etc
+ * @slave: whether this channel is a device (slave) or for memcpy
+ * @signal: the physical DMA request signal which this channel is using
+ * @mux_use: count of descriptors using this DMA request signal setting
+ */
+struct pl08x_dma_chan {
+ struct virt_dma_chan vc;
+ struct pl08x_phy_chan *phychan;
+ const char *name;
+ const struct pl08x_channel_data *cd;
+ struct dma_slave_config cfg;
+ struct pl08x_txd *at;
+ struct pl08x_driver_data *host;
+ enum pl08x_dma_chan_state state;
+ bool slave;
+ int signal;
+ unsigned mux_use;
+};
+
+/**
* struct pl08x_driver_data - the local state holder for the PL08x
* @slave: slave engine for this instance
* @memcpy: memcpy engine for this instance
@@ -128,7 +247,6 @@ struct pl08x_lli {
* @pd: platform data passed in from the platform/machine
* @phy_chans: array of data for the physical channels
* @pool: a pool for the LLI descriptors
- * @pool_ctr: counter of LLIs in the pool
* @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
* fetches
* @mem_buses: set to indicate memory transfers on AHB2.
@@ -143,10 +261,8 @@ struct pl08x_driver_data {
struct pl08x_platform_data *pd;
struct pl08x_phy_chan *phy_chans;
struct dma_pool *pool;
- int pool_ctr;
u8 lli_buses;
u8 mem_buses;
- spinlock_t lock;
};
/*
@@ -162,12 +278,51 @@ struct pl08x_driver_data {
static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
{
- return container_of(chan, struct pl08x_dma_chan, chan);
+ return container_of(chan, struct pl08x_dma_chan, vc.chan);
}
static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
{
- return container_of(tx, struct pl08x_txd, tx);
+ return container_of(tx, struct pl08x_txd, vd.tx);
+}
+
+/*
+ * Mux handling.
+ *
+ * This gives us the DMA request input to the PL08x primecell which the
+ * peripheral described by the channel data will be routed to, possibly
+ * via a board/SoC specific external MUX. One important point to note
+ * here is that this does not depend on the physical channel.
+ */
+static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
+{
+ const struct pl08x_platform_data *pd = plchan->host->pd;
+ int ret;
+
+ if (plchan->mux_use++ == 0 && pd->get_signal) {
+ ret = pd->get_signal(plchan->cd);
+ if (ret < 0) {
+ plchan->mux_use = 0;
+ return ret;
+ }
+
+ plchan->signal = ret;
+ }
+ return 0;
+}
+
+static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
+{
+ const struct pl08x_platform_data *pd = plchan->host->pd;
+
+ if (plchan->signal >= 0) {
+ WARN_ON(plchan->mux_use == 0);
+
+ if (--plchan->mux_use == 0 && pd->put_signal) {
+ pd->put_signal(plchan->cd, plchan->signal);
+ plchan->signal = -1;
+ }
+ }
}
/*
@@ -189,20 +344,25 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
* been set when the LLIs were constructed. Poke them into the hardware
* and start the transfer.
*/
-static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
- struct pl08x_txd *txd)
+static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
{
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_phy_chan *phychan = plchan->phychan;
- struct pl08x_lli *lli = &txd->llis_va[0];
+ struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
+ struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
+ struct pl08x_lli *lli;
u32 val;
+ list_del(&txd->vd.node);
+
plchan->at = txd;
/* Wait for channel inactive */
while (pl08x_phy_channel_busy(phychan))
cpu_relax();
+ lli = &txd->llis_va[0];
+
dev_vdbg(&pl08x->adev->dev,
"WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
"clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
@@ -311,10 +471,8 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
{
struct pl08x_phy_chan *ch;
struct pl08x_txd *txd;
- unsigned long flags;
size_t bytes = 0;
- spin_lock_irqsave(&plchan->lock, flags);
ch = plchan->phychan;
txd = plchan->at;
@@ -354,18 +512,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
}
}
- /* Sum up all queued transactions */
- if (!list_empty(&plchan->pend_list)) {
- struct pl08x_txd *txdi;
- list_for_each_entry(txdi, &plchan->pend_list, node) {
- struct pl08x_sg *dsg;
- list_for_each_entry(dsg, &txd->dsg_list, node)
- bytes += dsg->len;
- }
- }
-
- spin_unlock_irqrestore(&plchan->lock, flags);
-
return bytes;
}
@@ -391,7 +537,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
if (!ch->locked && !ch->serving) {
ch->serving = virt_chan;
- ch->signal = -1;
spin_unlock_irqrestore(&ch->lock, flags);
break;
}
@@ -404,25 +549,114 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
return NULL;
}
- pm_runtime_get_sync(&pl08x->adev->dev);
return ch;
}
+/* Mark the physical channel as free. Note, this write is atomic. */
static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
struct pl08x_phy_chan *ch)
{
- unsigned long flags;
+ ch->serving = NULL;
+}
+
+/*
+ * Try to allocate a physical channel. When successful, assign it to
+ * this virtual channel, and initiate the next descriptor. The
+ * virtual channel lock must be held at this point.
+ */
+static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_phy_chan *ch;
- spin_lock_irqsave(&ch->lock, flags);
+ ch = pl08x_get_phy_channel(pl08x, plchan);
+ if (!ch) {
+ dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
+ plchan->state = PL08X_CHAN_WAITING;
+ return;
+ }
- /* Stop the channel and clear its interrupts */
- pl08x_terminate_phy_chan(pl08x, ch);
+ dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
+ ch->id, plchan->name);
- pm_runtime_put(&pl08x->adev->dev);
+ plchan->phychan = ch;
+ plchan->state = PL08X_CHAN_RUNNING;
+ pl08x_start_next_txd(plchan);
+}
- /* Mark it as free */
- ch->serving = NULL;
- spin_unlock_irqrestore(&ch->lock, flags);
+static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch,
+ struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_driver_data *pl08x = plchan->host;
+
+ dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n",
+ ch->id, plchan->name);
+
+ /*
+ * We do this without taking the lock; we're really only concerned
+ * about whether this pointer is NULL or not, and we're guaranteed
+ * that this will only be called when it _already_ is non-NULL.
+ */
+ ch->serving = plchan;
+ plchan->phychan = ch;
+ plchan->state = PL08X_CHAN_RUNNING;
+ pl08x_start_next_txd(plchan);
+}
+
+/*
+ * Free a physical DMA channel, potentially reallocating it to another
+ * virtual channel if we have any pending.
+ */
+static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_dma_chan *p, *next;
+
+ retry:
+ next = NULL;
+
+ /* Find a waiting virtual channel for the next transfer. */
+ list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
+ if (p->state == PL08X_CHAN_WAITING) {
+ next = p;
+ break;
+ }
+
+ if (!next) {
+ list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
+ if (p->state == PL08X_CHAN_WAITING) {
+ next = p;
+ break;
+ }
+ }
+
+ /* Ensure that the physical channel is stopped */
+ pl08x_terminate_phy_chan(pl08x, plchan->phychan);
+
+ if (next) {
+ bool success;
+
+ /*
+ * Eww. We know this isn't going to deadlock
+ * but lockdep probably doesn't.
+ */
+ spin_lock(&next->vc.lock);
+ /* Re-check the state now that we have the lock */
+ success = next->state == PL08X_CHAN_WAITING;
+ if (success)
+ pl08x_phy_reassign_start(plchan->phychan, next);
+ spin_unlock(&next->vc.lock);
+
+ /* If the state changed, try to find another channel */
+ if (!success)
+ goto retry;
+ } else {
+ /* No more jobs, so free up the physical channel */
+ pl08x_put_phy_channel(pl08x, plchan->phychan);
+ }
+
+ plchan->phychan = NULL;
+ plchan->state = PL08X_CHAN_IDLE;
}
/*
@@ -585,8 +819,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
return 0;
}
- pl08x->pool_ctr++;
-
bd.txd = txd;
bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
cctl = txd->cctl;
@@ -802,18 +1034,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
return num_llis;
}
-/* You should call this with the struct pl08x lock held */
static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
struct pl08x_txd *txd)
{
struct pl08x_sg *dsg, *_dsg;
- /* Free the LLI */
if (txd->llis_va)
dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
- pl08x->pool_ctr--;
-
list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
list_del(&dsg->node);
kfree(dsg);
@@ -822,133 +1050,75 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
kfree(txd);
}
-static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
- struct pl08x_dma_chan *plchan)
+static void pl08x_unmap_buffers(struct pl08x_txd *txd)
{
- struct pl08x_txd *txdi = NULL;
- struct pl08x_txd *next;
-
- if (!list_empty(&plchan->pend_list)) {
- list_for_each_entry_safe(txdi,
- next, &plchan->pend_list, node) {
- list_del(&txdi->node);
- pl08x_free_txd(pl08x, txdi);
+ struct device *dev = txd->vd.tx.chan->device->dev;
+ struct pl08x_sg *dsg;
+
+ if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
+ else {
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
}
}
+ if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
+ else
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
+ }
}
-/*
- * The DMA ENGINE API
- */
-static int pl08x_alloc_chan_resources(struct dma_chan *chan)
+static void pl08x_desc_free(struct virt_dma_desc *vd)
{
- return 0;
-}
+ struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
-static void pl08x_free_chan_resources(struct dma_chan *chan)
-{
+ if (!plchan->slave)
+ pl08x_unmap_buffers(txd);
+
+ if (!txd->done)
+ pl08x_release_mux(plchan);
+
+ pl08x_free_txd(plchan->host, txd);
}
-/*
- * This should be called with the channel plchan->lock held
- */
-static int prep_phy_channel(struct pl08x_dma_chan *plchan,
- struct pl08x_txd *txd)
+static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
+ struct pl08x_dma_chan *plchan)
{
- struct pl08x_driver_data *pl08x = plchan->host;
- struct pl08x_phy_chan *ch;
- int ret;
-
- /* Check if we already have a channel */
- if (plchan->phychan) {
- ch = plchan->phychan;
- goto got_channel;
- }
+ LIST_HEAD(head);
+ struct pl08x_txd *txd;
- ch = pl08x_get_phy_channel(pl08x, plchan);
- if (!ch) {
- /* No physical channel available, cope with it */
- dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
- return -EBUSY;
- }
+ vchan_get_all_descriptors(&plchan->vc, &head);
- /*
- * OK we have a physical channel: for memcpy() this is all we
- * need, but for slaves the physical signals may be muxed!
- * Can the platform allow us to use this channel?
- */
- if (plchan->slave && pl08x->pd->get_signal) {
- ret = pl08x->pd->get_signal(plchan);
- if (ret < 0) {
- dev_dbg(&pl08x->adev->dev,
- "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
- ch->id, plchan->name);
- /* Release physical channel & return */
- pl08x_put_phy_channel(pl08x, ch);
- return -EBUSY;
- }
- ch->signal = ret;
+ while (!list_empty(&head)) {
+ txd = list_first_entry(&head, struct pl08x_txd, vd.node);
+ list_del(&txd->vd.node);
+ pl08x_desc_free(&txd->vd);
}
-
- plchan->phychan = ch;
- dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
- ch->id,
- ch->signal,
- plchan->name);
-
-got_channel:
- /* Assign the flow control signal to this channel */
- if (txd->direction == DMA_MEM_TO_DEV)
- txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
- else if (txd->direction == DMA_DEV_TO_MEM)
- txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
-
- plchan->phychan_hold++;
-
- return 0;
}
-static void release_phy_channel(struct pl08x_dma_chan *plchan)
+/*
+ * The DMA ENGINE API
+ */
+static int pl08x_alloc_chan_resources(struct dma_chan *chan)
{
- struct pl08x_driver_data *pl08x = plchan->host;
-
- if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
- pl08x->pd->put_signal(plchan);
- plchan->phychan->signal = -1;
- }
- pl08x_put_phy_channel(pl08x, plchan->phychan);
- plchan->phychan = NULL;
+ return 0;
}
-static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
+static void pl08x_free_chan_resources(struct dma_chan *chan)
{
- struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
- struct pl08x_txd *txd = to_pl08x_txd(tx);
- unsigned long flags;
- dma_cookie_t cookie;
-
- spin_lock_irqsave(&plchan->lock, flags);
- cookie = dma_cookie_assign(tx);
-
- /* Put this onto the pending list */
- list_add_tail(&txd->node, &plchan->pend_list);
-
- /*
- * If there was no physical channel available for this memcpy,
- * stack the request up and indicate that the channel is waiting
- * for a free physical channel.
- */
- if (!plchan->slave && !plchan->phychan) {
- /* Do this memcpy whenever there is a channel ready */
- plchan->state = PL08X_CHAN_WAITING;
- plchan->waiting = txd;
- } else {
- plchan->phychan_hold--;
- }
-
- spin_unlock_irqrestore(&plchan->lock, flags);
-
- return cookie;
+ /* Ensure all queued descriptors are freed */
+ vchan_free_chan_resources(to_virt_chan(chan));
}
static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
@@ -968,23 +1138,53 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct virt_dma_desc *vd;
+ unsigned long flags;
enum dma_status ret;
+ size_t bytes = 0;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS)
return ret;
/*
+ * There's no point calculating the residue if there's
+ * no txstate to store the value.
+ */
+ if (!txstate) {
+ if (plchan->state == PL08X_CHAN_PAUSED)
+ ret = DMA_PAUSED;
+ return ret;
+ }
+
+ spin_lock_irqsave(&plchan->vc.lock, flags);
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret != DMA_SUCCESS) {
+ vd = vchan_find_desc(&plchan->vc, cookie);
+ if (vd) {
+ /* On the issued list, so hasn't been processed yet */
+ struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
+ struct pl08x_sg *dsg;
+
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ bytes += dsg->len;
+ } else {
+ bytes = pl08x_getbytes_chan(plchan);
+ }
+ }
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
+
+ /*
* This cookie not complete yet
* Get number of bytes left in the active transactions and queue
*/
- dma_set_residue(txstate, pl08x_getbytes_chan(plchan));
+ dma_set_residue(txstate, bytes);
- if (plchan->state == PL08X_CHAN_PAUSED)
- return DMA_PAUSED;
+ if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS)
+ ret = DMA_PAUSED;
/* Whether waiting or running, we're in progress */
- return DMA_IN_PROGRESS;
+ return ret;
}
/* PrimeCell DMA extension */
@@ -1080,38 +1280,14 @@ static u32 pl08x_burst(u32 maxburst)
return burst_sizes[i].reg;
}
-static int dma_set_runtime_config(struct dma_chan *chan,
- struct dma_slave_config *config)
+static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
+ enum dma_slave_buswidth addr_width, u32 maxburst)
{
- struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
- struct pl08x_driver_data *pl08x = plchan->host;
- enum dma_slave_buswidth addr_width;
- u32 width, burst, maxburst;
- u32 cctl = 0;
-
- if (!plchan->slave)
- return -EINVAL;
-
- /* Transfer direction */
- plchan->runtime_direction = config->direction;
- if (config->direction == DMA_MEM_TO_DEV) {
- addr_width = config->dst_addr_width;
- maxburst = config->dst_maxburst;
- } else if (config->direction == DMA_DEV_TO_MEM) {
- addr_width = config->src_addr_width;
- maxburst = config->src_maxburst;
- } else {
- dev_err(&pl08x->adev->dev,
- "bad runtime_config: alien transfer direction\n");
- return -EINVAL;
- }
+ u32 width, burst, cctl = 0;
width = pl08x_width(addr_width);
- if (width == ~0) {
- dev_err(&pl08x->adev->dev,
- "bad runtime_config: alien address width\n");
- return -EINVAL;
- }
+ if (width == ~0)
+ return ~0;
cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
@@ -1128,28 +1304,23 @@ static int dma_set_runtime_config(struct dma_chan *chan,
cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
- plchan->device_fc = config->device_fc;
+ return pl08x_cctl(cctl);
+}
- if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
- plchan->src_addr = config->src_addr;
- plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
- pl08x_select_bus(plchan->cd->periph_buses,
- pl08x->mem_buses);
- } else {
- plchan->dst_addr = config->dst_addr;
- plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
- pl08x_select_bus(pl08x->mem_buses,
- plchan->cd->periph_buses);
- }
+static int dma_set_runtime_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
- dev_dbg(&pl08x->adev->dev,
- "configured channel %s (%s) for %s, data width %d, "
- "maxburst %d words, LE, CCTL=0x%08x\n",
- dma_chan_name(chan), plchan->name,
- (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
- addr_width,
- maxburst,
- cctl);
+ if (!plchan->slave)
+ return -EINVAL;
+
+ /* Reject definitely invalid configurations */
+ if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+ return -EINVAL;
+
+ plchan->cfg = *config;
return 0;
}
@@ -1163,95 +1334,19 @@ static void pl08x_issue_pending(struct dma_chan *chan)
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
unsigned long flags;
- spin_lock_irqsave(&plchan->lock, flags);
- /* Something is already active, or we're waiting for a channel... */
- if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
- spin_unlock_irqrestore(&plchan->lock, flags);
- return;
- }
-
- /* Take the first element in the queue and execute it */
- if (!list_empty(&plchan->pend_list)) {
- struct pl08x_txd *next;
-
- next = list_first_entry(&plchan->pend_list,
- struct pl08x_txd,
- node);
- list_del(&next->node);
- plchan->state = PL08X_CHAN_RUNNING;
-
- pl08x_start_txd(plchan, next);
+ spin_lock_irqsave(&plchan->vc.lock, flags);
+ if (vchan_issue_pending(&plchan->vc)) {
+ if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
+ pl08x_phy_alloc_and_start(plchan);
}
-
- spin_unlock_irqrestore(&plchan->lock, flags);
-}
-
-static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
- struct pl08x_txd *txd)
-{
- struct pl08x_driver_data *pl08x = plchan->host;
- unsigned long flags;
- int num_llis, ret;
-
- num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
- if (!num_llis) {
- spin_lock_irqsave(&plchan->lock, flags);
- pl08x_free_txd(pl08x, txd);
- spin_unlock_irqrestore(&plchan->lock, flags);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&plchan->lock, flags);
-
- /*
- * See if we already have a physical channel allocated,
- * else this is the time to try to get one.
- */
- ret = prep_phy_channel(plchan, txd);
- if (ret) {
- /*
- * No physical channel was available.
- *
- * memcpy transfers can be sorted out at submission time.
- *
- * Slave transfers may have been denied due to platform
- * channel muxing restrictions. Since there is no guarantee
- * that this will ever be resolved, and the signal must be
- * acquired AFTER acquiring the physical channel, we will let
- * them be NACK:ed with -EBUSY here. The drivers can retry
- * the prep() call if they are eager on doing this using DMA.
- */
- if (plchan->slave) {
- pl08x_free_txd_list(pl08x, plchan);
- pl08x_free_txd(pl08x, txd);
- spin_unlock_irqrestore(&plchan->lock, flags);
- return -EBUSY;
- }
- } else
- /*
- * Else we're all set, paused and ready to roll, status
- * will switch to PL08X_CHAN_RUNNING when we call
- * issue_pending(). If there is something running on the
- * channel already we don't change its state.
- */
- if (plchan->state == PL08X_CHAN_IDLE)
- plchan->state = PL08X_CHAN_PAUSED;
-
- spin_unlock_irqrestore(&plchan->lock, flags);
-
- return 0;
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
}
-static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
- unsigned long flags)
+static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
{
struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
if (txd) {
- dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
- txd->tx.flags = flags;
- txd->tx.tx_submit = pl08x_tx_submit;
- INIT_LIST_HEAD(&txd->node);
INIT_LIST_HEAD(&txd->dsg_list);
/* Always enable error and terminal interrupts */
@@ -1274,7 +1369,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
struct pl08x_sg *dsg;
int ret;
- txd = pl08x_get_txd(plchan, flags);
+ txd = pl08x_get_txd(plchan);
if (!txd) {
dev_err(&pl08x->adev->dev,
"%s no memory for descriptor\n", __func__);
@@ -1290,14 +1385,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
}
list_add_tail(&dsg->node, &txd->dsg_list);
- txd->direction = DMA_NONE;
dsg->src_addr = src;
dsg->dst_addr = dest;
dsg->len = len;
/* Set platform data for m2m */
txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
- txd->cctl = pl08x->pd->memcpy_channel.cctl &
+ txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy &
~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
/* Both to be incremented or the code will break */
@@ -1307,11 +1401,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
pl08x->mem_buses);
- ret = pl08x_prep_channel_resources(plchan, txd);
- if (ret)
+ ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+ if (!ret) {
+ pl08x_free_txd(pl08x, txd);
return NULL;
+ }
- return &txd->tx;
+ return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
}
static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
@@ -1324,36 +1420,40 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct pl08x_txd *txd;
struct pl08x_sg *dsg;
struct scatterlist *sg;
+ enum dma_slave_buswidth addr_width;
dma_addr_t slave_addr;
int ret, tmp;
+ u8 src_buses, dst_buses;
+ u32 maxburst, cctl;
dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
__func__, sg_dma_len(sgl), plchan->name);
- txd = pl08x_get_txd(plchan, flags);
+ txd = pl08x_get_txd(plchan);
if (!txd) {
dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
return NULL;
}
- if (direction != plchan->runtime_direction)
- dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
- "the direction configured for the PrimeCell\n",
- __func__);
-
/*
* Set up addresses, the PrimeCell configured address
* will take precedence since this may configure the
* channel target address dynamically at runtime.
*/
- txd->direction = direction;
-
if (direction == DMA_MEM_TO_DEV) {
- txd->cctl = plchan->dst_cctl;
- slave_addr = plchan->dst_addr;
+ cctl = PL080_CONTROL_SRC_INCR;
+ slave_addr = plchan->cfg.dst_addr;
+ addr_width = plchan->cfg.dst_addr_width;
+ maxburst = plchan->cfg.dst_maxburst;
+ src_buses = pl08x->mem_buses;
+ dst_buses = plchan->cd->periph_buses;
} else if (direction == DMA_DEV_TO_MEM) {
- txd->cctl = plchan->src_cctl;
- slave_addr = plchan->src_addr;
+ cctl = PL080_CONTROL_DST_INCR;
+ slave_addr = plchan->cfg.src_addr;
+ addr_width = plchan->cfg.src_addr_width;
+ maxburst = plchan->cfg.src_maxburst;
+ src_buses = plchan->cd->periph_buses;
+ dst_buses = pl08x->mem_buses;
} else {
pl08x_free_txd(pl08x, txd);
dev_err(&pl08x->adev->dev,
@@ -1361,7 +1461,17 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
return NULL;
}
- if (plchan->device_fc)
+ cctl |= pl08x_get_cctl(plchan, addr_width, maxburst);
+ if (cctl == ~0) {
+ pl08x_free_txd(pl08x, txd);
+ dev_err(&pl08x->adev->dev,
+ "DMA slave configuration botched?\n");
+ return NULL;
+ }
+
+ txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
+
+ if (plchan->cfg.device_fc)
tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
PL080_FLOW_PER2MEM_PER;
else
@@ -1370,9 +1480,28 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ ret = pl08x_request_mux(plchan);
+ if (ret < 0) {
+ pl08x_free_txd(pl08x, txd);
+ dev_dbg(&pl08x->adev->dev,
+ "unable to mux for transfer on %s due to platform restrictions\n",
+ plchan->name);
+ return NULL;
+ }
+
+ dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n",
+ plchan->signal, plchan->name);
+
+ /* Assign the flow control signal to this channel */
+ if (direction == DMA_MEM_TO_DEV)
+ txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
+ else
+ txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+
for_each_sg(sgl, sg, sg_len, tmp) {
dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
if (!dsg) {
+ pl08x_release_mux(plchan);
pl08x_free_txd(pl08x, txd);
dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
__func__);
@@ -1390,11 +1519,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
}
}
- ret = pl08x_prep_channel_resources(plchan, txd);
- if (ret)
+ ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+ if (!ret) {
+ pl08x_release_mux(plchan);
+ pl08x_free_txd(pl08x, txd);
return NULL;
+ }
- return &txd->tx;
+ return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
}
static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -1415,9 +1547,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* Anything succeeds on channels with no physical allocation and
* no queued transfers.
*/
- spin_lock_irqsave(&plchan->lock, flags);
+ spin_lock_irqsave(&plchan->vc.lock, flags);
if (!plchan->phychan && !plchan->at) {
- spin_unlock_irqrestore(&plchan->lock, flags);
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
return 0;
}
@@ -1426,18 +1558,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
plchan->state = PL08X_CHAN_IDLE;
if (plchan->phychan) {
- pl08x_terminate_phy_chan(pl08x, plchan->phychan);
-
/*
* Mark physical channel as free and free any slave
* signal
*/
- release_phy_channel(plchan);
- plchan->phychan_hold = 0;
+ pl08x_phy_free(plchan);
}
/* Dequeue jobs and free LLIs */
if (plchan->at) {
- pl08x_free_txd(pl08x, plchan->at);
+ pl08x_desc_free(&plchan->at->vd);
plchan->at = NULL;
}
/* Dequeue jobs not yet fired as well */
@@ -1457,7 +1586,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
break;
}
- spin_unlock_irqrestore(&plchan->lock, flags);
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
return ret;
}
@@ -1494,123 +1623,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
}
-static void pl08x_unmap_buffers(struct pl08x_txd *txd)
-{
- struct device *dev = txd->tx.chan->device->dev;
- struct pl08x_sg *dsg;
-
- if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_single(dev, dsg->src_addr, dsg->len,
- DMA_TO_DEVICE);
- else {
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_page(dev, dsg->src_addr, dsg->len,
- DMA_TO_DEVICE);
- }
- }
- if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_single(dev, dsg->dst_addr, dsg->len,
- DMA_FROM_DEVICE);
- else
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_page(dev, dsg->dst_addr, dsg->len,
- DMA_FROM_DEVICE);
- }
-}
-
-static void pl08x_tasklet(unsigned long data)
-{
- struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
- struct pl08x_driver_data *pl08x = plchan->host;
- struct pl08x_txd *txd;
- unsigned long flags;
-
- spin_lock_irqsave(&plchan->lock, flags);
-
- txd = plchan->at;
- plchan->at = NULL;
-
- if (txd) {
- /* Update last completed */
- dma_cookie_complete(&txd->tx);
- }
-
- /* If a new descriptor is queued, set it up plchan->at is NULL here */
- if (!list_empty(&plchan->pend_list)) {
- struct pl08x_txd *next;
-
- next = list_first_entry(&plchan->pend_list,
- struct pl08x_txd,
- node);
- list_del(&next->node);
-
- pl08x_start_txd(plchan, next);
- } else if (plchan->phychan_hold) {
- /*
- * This channel is still in use - we have a new txd being
- * prepared and will soon be queued. Don't give up the
- * physical channel.
- */
- } else {
- struct pl08x_dma_chan *waiting = NULL;
-
- /*
- * No more jobs, so free up the physical channel
- * Free any allocated signal on slave transfers too
- */
- release_phy_channel(plchan);
- plchan->state = PL08X_CHAN_IDLE;
-
- /*
- * And NOW before anyone else can grab that free:d up
- * physical channel, see if there is some memcpy pending
- * that seriously needs to start because of being stacked
- * up while we were choking the physical channels with data.
- */
- list_for_each_entry(waiting, &pl08x->memcpy.channels,
- chan.device_node) {
- if (waiting->state == PL08X_CHAN_WAITING &&
- waiting->waiting != NULL) {
- int ret;
-
- /* This should REALLY not fail now */
- ret = prep_phy_channel(waiting,
- waiting->waiting);
- BUG_ON(ret);
- waiting->phychan_hold--;
- waiting->state = PL08X_CHAN_RUNNING;
- waiting->waiting = NULL;
- pl08x_issue_pending(&waiting->chan);
- break;
- }
- }
- }
-
- spin_unlock_irqrestore(&plchan->lock, flags);
-
- if (txd) {
- dma_async_tx_callback callback = txd->tx.callback;
- void *callback_param = txd->tx.callback_param;
-
- /* Don't try to unmap buffers on slave channels */
- if (!plchan->slave)
- pl08x_unmap_buffers(txd);
-
- /* Free the descriptor */
- spin_lock_irqsave(&plchan->lock, flags);
- pl08x_free_txd(pl08x, txd);
- spin_unlock_irqrestore(&plchan->lock, flags);
-
- /* Callback to signal completion */
- if (callback)
- callback(callback_param);
- }
-}
-
static irqreturn_t pl08x_irq(int irq, void *dev)
{
struct pl08x_driver_data *pl08x = dev;
@@ -1635,6 +1647,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
/* Locate physical channel */
struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
struct pl08x_dma_chan *plchan = phychan->serving;
+ struct pl08x_txd *tx;
if (!plchan) {
dev_err(&pl08x->adev->dev,
@@ -1643,8 +1656,29 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
continue;
}
- /* Schedule tasklet on this channel */
- tasklet_schedule(&plchan->tasklet);
+ spin_lock(&plchan->vc.lock);
+ tx = plchan->at;
+ if (tx) {
+ plchan->at = NULL;
+ /*
+ * This descriptor is done, release its mux
+ * reservation.
+ */
+ pl08x_release_mux(plchan);
+ tx->done = true;
+ vchan_cookie_complete(&tx->vd);
+
+ /*
+ * And start the next descriptor (if any),
+ * otherwise free this channel.
+ */
+ if (vchan_next_desc(&plchan->vc))
+ pl08x_start_next_txd(plchan);
+ else
+ pl08x_phy_free(plchan);
+ }
+ spin_unlock(&plchan->vc.lock);
+
mask |= (1 << i);
}
}
@@ -1654,16 +1688,10 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
{
- u32 cctl = pl08x_cctl(chan->cd->cctl);
-
chan->slave = true;
chan->name = chan->cd->bus_id;
- chan->src_addr = chan->cd->addr;
- chan->dst_addr = chan->cd->addr;
- chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
- pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
- chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
- pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses);
+ chan->cfg.src_addr = chan->cd->addr;
+ chan->cfg.dst_addr = chan->cd->addr;
}
/*
@@ -1693,6 +1721,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
chan->host = pl08x;
chan->state = PL08X_CHAN_IDLE;
+ chan->signal = -1;
if (slave) {
chan->cd = &pl08x->pd->slave_channels[i];
@@ -1705,26 +1734,12 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
return -ENOMEM;
}
}
- if (chan->cd->circular_buffer) {
- dev_err(&pl08x->adev->dev,
- "channel %s: circular buffers not supported\n",
- chan->name);
- kfree(chan);
- continue;
- }
dev_dbg(&pl08x->adev->dev,
"initialize virtual channel \"%s\"\n",
chan->name);
- chan->chan.device = dmadev;
- dma_cookie_init(&chan->chan);
-
- spin_lock_init(&chan->lock);
- INIT_LIST_HEAD(&chan->pend_list);
- tasklet_init(&chan->tasklet, pl08x_tasklet,
- (unsigned long) chan);
-
- list_add_tail(&chan->chan.device_node, &dmadev->channels);
+ chan->vc.desc_free = pl08x_desc_free;
+ vchan_init(&chan->vc, dmadev);
}
dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
i, slave ? "slave" : "memcpy");
@@ -1737,8 +1752,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev)
struct pl08x_dma_chan *next;
list_for_each_entry_safe(chan,
- next, &dmadev->channels, chan.device_node) {
- list_del(&chan->chan.device_node);
+ next, &dmadev->channels, vc.chan.device_node) {
+ list_del(&chan->vc.chan.device_node);
kfree(chan);
}
}
@@ -1791,7 +1806,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, "\nPL08x virtual memcpy channels:\n");
seq_printf(s, "CHANNEL:\tSTATE:\n");
seq_printf(s, "--------\t------\n");
- list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
+ list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) {
seq_printf(s, "%s\t\t%s\n", chan->name,
pl08x_state_str(chan->state));
}
@@ -1799,7 +1814,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, "\nPL08x virtual slave channels:\n");
seq_printf(s, "CHANNEL:\tSTATE:\n");
seq_printf(s, "--------\t------\n");
- list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
+ list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
seq_printf(s, "%s\t\t%s\n", chan->name,
pl08x_state_str(chan->state));
}
@@ -1851,9 +1866,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
goto out_no_pl08x;
}
- pm_runtime_set_active(&adev->dev);
- pm_runtime_enable(&adev->dev);
-
/* Initialize memcpy engine */
dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
pl08x->memcpy.dev = &adev->dev;
@@ -1903,8 +1915,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
goto out_no_lli_pool;
}
- spin_lock_init(&pl08x->lock);
-
pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
if (!pl08x->base) {
ret = -ENOMEM;
@@ -1942,7 +1952,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
ch->id = i;
ch->base = pl08x->base + PL080_Cx_BASE(i);
spin_lock_init(&ch->lock);
- ch->signal = -1;
/*
* Nomadik variants can have channels that are locked
@@ -2007,7 +2016,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
amba_part(adev), amba_rev(adev),
(unsigned long long)adev->res.start, adev->irq[0]);
- pm_runtime_put(&adev->dev);
return 0;
out_no_slave_reg:
@@ -2026,9 +2034,6 @@ out_no_ioremap:
dma_pool_destroy(pl08x->pool);
out_no_lli_pool:
out_no_platdata:
- pm_runtime_put(&adev->dev);
- pm_runtime_disable(&adev->dev);
-
kfree(pl08x);
out_no_pl08x:
amba_release_regions(adev);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
new file mode 100644
index 00000000000..ae056182613
--- /dev/null
+++ b/drivers/dma/omap-dma.c
@@ -0,0 +1,669 @@
+/*
+ * OMAP DMAengine support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/omap-dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+#include <plat/dma.h>
+
+struct omap_dmadev {
+ struct dma_device ddev;
+ spinlock_t lock;
+ struct tasklet_struct task;
+ struct list_head pending;
+};
+
+struct omap_chan {
+ struct virt_dma_chan vc;
+ struct list_head node;
+
+ struct dma_slave_config cfg;
+ unsigned dma_sig;
+ bool cyclic;
+
+ int dma_ch;
+ struct omap_desc *desc;
+ unsigned sgidx;
+};
+
+struct omap_sg {
+ dma_addr_t addr;
+ uint32_t en; /* number of elements (24-bit) */
+ uint32_t fn; /* number of frames (16-bit) */
+};
+
+struct omap_desc {
+ struct virt_dma_desc vd;
+ enum dma_transfer_direction dir;
+ dma_addr_t dev_addr;
+
+ int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
+ uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
+ uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
+ uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
+ uint8_t periph_port; /* Peripheral port */
+
+ unsigned sglen;
+ struct omap_sg sg[0];
+};
+
+static const unsigned es_bytes[] = {
+ [OMAP_DMA_DATA_TYPE_S8] = 1,
+ [OMAP_DMA_DATA_TYPE_S16] = 2,
+ [OMAP_DMA_DATA_TYPE_S32] = 4,
+};
+
+static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
+{
+ return container_of(d, struct omap_dmadev, ddev);
+}
+
+static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct omap_chan, vc.chan);
+}
+
+static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct omap_desc, vd.tx);
+}
+
+static void omap_dma_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(container_of(vd, struct omap_desc, vd));
+}
+
+static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
+ unsigned idx)
+{
+ struct omap_sg *sg = d->sg + idx;
+
+ if (d->dir == DMA_DEV_TO_MEM)
+ omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
+ OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
+ else
+ omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
+ OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
+
+ omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
+ d->sync_mode, c->dma_sig, d->sync_type);
+
+ omap_start_dma(c->dma_ch);
+}
+
+static void omap_dma_start_desc(struct omap_chan *c)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+ struct omap_desc *d;
+
+ if (!vd) {
+ c->desc = NULL;
+ return;
+ }
+
+ list_del(&vd->node);
+
+ c->desc = d = to_omap_dma_desc(&vd->tx);
+ c->sgidx = 0;
+
+ if (d->dir == DMA_DEV_TO_MEM)
+ omap_set_dma_src_params(c->dma_ch, d->periph_port,
+ OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
+ else
+ omap_set_dma_dest_params(c->dma_ch, d->periph_port,
+ OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
+
+ omap_dma_start_sg(c, d, 0);
+}
+
+static void omap_dma_callback(int ch, u16 status, void *data)
+{
+ struct omap_chan *c = data;
+ struct omap_desc *d;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ d = c->desc;
+ if (d) {
+ if (!c->cyclic) {
+ if (++c->sgidx < d->sglen) {
+ omap_dma_start_sg(c, d, c->sgidx);
+ } else {
+ omap_dma_start_desc(c);
+ vchan_cookie_complete(&d->vd);
+ }
+ } else {
+ vchan_cyclic_callback(&d->vd);
+ }
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+/*
+ * This callback schedules all pending channels. We could be more
+ * clever here by postponing allocation of the real DMA channels to
+ * this point, and freeing them when our virtual channel becomes idle.
+ *
+ * We would then need to deal with 'all channels in-use'
+ */
+static void omap_dma_sched(unsigned long data)
+{
+ struct omap_dmadev *d = (struct omap_dmadev *)data;
+ LIST_HEAD(head);
+
+ spin_lock_irq(&d->lock);
+ list_splice_tail_init(&d->pending, &head);
+ spin_unlock_irq(&d->lock);
+
+ while (!list_empty(&head)) {
+ struct omap_chan *c = list_first_entry(&head,
+ struct omap_chan, node);
+
+ spin_lock_irq(&c->vc.lock);
+ list_del_init(&c->node);
+ omap_dma_start_desc(c);
+ spin_unlock_irq(&c->vc.lock);
+ }
+}
+
+static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+
+ dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
+
+ return omap_request_dma(c->dma_sig, "DMA engine",
+ omap_dma_callback, c, &c->dma_ch);
+}
+
+static void omap_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+
+ vchan_free_chan_resources(&c->vc);
+ omap_free_dma(c->dma_ch);
+
+ dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
+}
+
+static size_t omap_dma_sg_size(struct omap_sg *sg)
+{
+ return sg->en * sg->fn;
+}
+
+static size_t omap_dma_desc_size(struct omap_desc *d)
+{
+ unsigned i;
+ size_t size;
+
+ for (size = i = 0; i < d->sglen; i++)
+ size += omap_dma_sg_size(&d->sg[i]);
+
+ return size * es_bytes[d->es];
+}
+
+static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
+{
+ unsigned i;
+ size_t size, es_size = es_bytes[d->es];
+
+ for (size = i = 0; i < d->sglen; i++) {
+ size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
+
+ if (size)
+ size += this_size;
+ else if (addr >= d->sg[i].addr &&
+ addr < d->sg[i].addr + this_size)
+ size += d->sg[i].addr + this_size - addr;
+ }
+ return size;
+}
+
+static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_SUCCESS || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vd = vchan_find_desc(&c->vc, cookie);
+ if (vd) {
+ txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
+ } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
+ struct omap_desc *d = c->desc;
+ dma_addr_t pos;
+
+ if (d->dir == DMA_MEM_TO_DEV)
+ pos = omap_get_dma_src_pos(c->dma_ch);
+ else if (d->dir == DMA_DEV_TO_MEM)
+ pos = omap_get_dma_dst_pos(c->dma_ch);
+ else
+ pos = 0;
+
+ txstate->residue = omap_dma_desc_size_pos(d, pos);
+ } else {
+ txstate->residue = 0;
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+
+ return ret;
+}
+
+static void omap_dma_issue_pending(struct dma_chan *chan)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (vchan_issue_pending(&c->vc) && !c->desc) {
+ struct omap_dmadev *d = to_omap_dma_dev(chan->device);
+ spin_lock(&d->lock);
+ if (list_empty(&c->node))
+ list_add_tail(&c->node, &d->pending);
+ spin_unlock(&d->lock);
+ tasklet_schedule(&d->task);
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
+ enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct scatterlist *sgent;
+ struct omap_desc *d;
+ dma_addr_t dev_addr;
+ unsigned i, j = 0, es, en, frame_bytes, sync_type;
+ u32 burst;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_addr = c->cfg.src_addr;
+ dev_width = c->cfg.src_addr_width;
+ burst = c->cfg.src_maxburst;
+ sync_type = OMAP_DMA_SRC_SYNC;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = c->cfg.dst_addr;
+ dev_width = c->cfg.dst_addr_width;
+ burst = c->cfg.dst_maxburst;
+ sync_type = OMAP_DMA_DST_SYNC;
+ } else {
+ dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ /* Bus width translates to the element size (ES) */
+ switch (dev_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ es = OMAP_DMA_DATA_TYPE_S8;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ es = OMAP_DMA_DATA_TYPE_S16;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ es = OMAP_DMA_DATA_TYPE_S32;
+ break;
+ default: /* not reached */
+ return NULL;
+ }
+
+ /* Now allocate and setup the descriptor. */
+ d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
+ if (!d)
+ return NULL;
+
+ d->dir = dir;
+ d->dev_addr = dev_addr;
+ d->es = es;
+ d->sync_mode = OMAP_DMA_SYNC_FRAME;
+ d->sync_type = sync_type;
+ d->periph_port = OMAP_DMA_PORT_TIPB;
+
+ /*
+ * Build our scatterlist entries: each contains the address,
+ * the number of elements (EN) in each frame, and the number of
+ * frames (FN). Number of bytes for this entry = ES * EN * FN.
+ *
+ * Burst size translates to number of elements with frame sync.
+ * Note: DMA engine defines burst to be the number of dev-width
+ * transfers.
+ */
+ en = burst;
+ frame_bytes = es_bytes[es] * en;
+ for_each_sg(sgl, sgent, sglen, i) {
+ d->sg[j].addr = sg_dma_address(sgent);
+ d->sg[j].en = en;
+ d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
+ j++;
+ }
+
+ d->sglen = j;
+
+ return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
+}
+
+static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction dir, void *context)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct omap_desc *d;
+ dma_addr_t dev_addr;
+ unsigned es, sync_type;
+ u32 burst;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_addr = c->cfg.src_addr;
+ dev_width = c->cfg.src_addr_width;
+ burst = c->cfg.src_maxburst;
+ sync_type = OMAP_DMA_SRC_SYNC;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = c->cfg.dst_addr;
+ dev_width = c->cfg.dst_addr_width;
+ burst = c->cfg.dst_maxburst;
+ sync_type = OMAP_DMA_DST_SYNC;
+ } else {
+ dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ /* Bus width translates to the element size (ES) */
+ switch (dev_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ es = OMAP_DMA_DATA_TYPE_S8;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ es = OMAP_DMA_DATA_TYPE_S16;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ es = OMAP_DMA_DATA_TYPE_S32;
+ break;
+ default: /* not reached */
+ return NULL;
+ }
+
+ /* Now allocate and setup the descriptor. */
+ d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
+ if (!d)
+ return NULL;
+
+ d->dir = dir;
+ d->dev_addr = dev_addr;
+ d->fi = burst;
+ d->es = es;
+ d->sync_mode = OMAP_DMA_SYNC_PACKET;
+ d->sync_type = sync_type;
+ d->periph_port = OMAP_DMA_PORT_MPUI;
+ d->sg[0].addr = buf_addr;
+ d->sg[0].en = period_len / es_bytes[es];
+ d->sg[0].fn = buf_len / period_len;
+ d->sglen = 1;
+
+ if (!c->cyclic) {
+ c->cyclic = true;
+ omap_dma_link_lch(c->dma_ch, c->dma_ch);
+ omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
+ omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
+ }
+
+ if (!cpu_class_is_omap1()) {
+ omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
+ }
+
+ return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+}
+
+static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
+{
+ if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+ return -EINVAL;
+
+ memcpy(&c->cfg, cfg, sizeof(c->cfg));
+
+ return 0;
+}
+
+static int omap_dma_terminate_all(struct omap_chan *c)
+{
+ struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+
+ /* Prevent this channel being scheduled */
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+
+ /*
+ * Stop DMA activity: we assume the callback will not be called
+ * after omap_stop_dma() returns (even if it does, it will see
+ * c->desc is NULL and exit.)
+ */
+ if (c->desc) {
+ c->desc = NULL;
+ omap_stop_dma(c->dma_ch);
+ }
+
+ if (c->cyclic) {
+ c->cyclic = false;
+ omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
+ }
+
+ vchan_get_all_descriptors(&c->vc, &head);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
+
+ return 0;
+}
+
+static int omap_dma_pause(struct omap_chan *c)
+{
+ /* FIXME: not supported by platform private API */
+ return -EINVAL;
+}
+
+static int omap_dma_resume(struct omap_chan *c)
+{
+ /* FIXME: not supported by platform private API */
+ return -EINVAL;
+}
+
+static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ int ret;
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
+ break;
+
+ case DMA_TERMINATE_ALL:
+ ret = omap_dma_terminate_all(c);
+ break;
+
+ case DMA_PAUSE:
+ ret = omap_dma_pause(c);
+ break;
+
+ case DMA_RESUME:
+ ret = omap_dma_resume(c);
+ break;
+
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
+static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
+{
+ struct omap_chan *c;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ c->dma_sig = dma_sig;
+ c->vc.desc_free = omap_dma_desc_free;
+ vchan_init(&c->vc, &od->ddev);
+ INIT_LIST_HEAD(&c->node);
+
+ od->ddev.chancnt++;
+
+ return 0;
+}
+
+static void omap_dma_free(struct omap_dmadev *od)
+{
+ tasklet_kill(&od->task);
+ while (!list_empty(&od->ddev.channels)) {
+ struct omap_chan *c = list_first_entry(&od->ddev.channels,
+ struct omap_chan, vc.chan.device_node);
+
+ list_del(&c->vc.chan.device_node);
+ tasklet_kill(&c->vc.task);
+ kfree(c);
+ }
+ kfree(od);
+}
+
+static int omap_dma_probe(struct platform_device *pdev)
+{
+ struct omap_dmadev *od;
+ int rc, i;
+
+ od = kzalloc(sizeof(*od), GFP_KERNEL);
+ if (!od)
+ return -ENOMEM;
+
+ dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
+ od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
+ od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
+ od->ddev.device_tx_status = omap_dma_tx_status;
+ od->ddev.device_issue_pending = omap_dma_issue_pending;
+ od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
+ od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
+ od->ddev.device_control = omap_dma_control;
+ od->ddev.dev = &pdev->dev;
+ INIT_LIST_HEAD(&od->ddev.channels);
+ INIT_LIST_HEAD(&od->pending);
+ spin_lock_init(&od->lock);
+
+ tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
+
+ for (i = 0; i < 127; i++) {
+ rc = omap_dma_chan_init(od, i);
+ if (rc) {
+ omap_dma_free(od);
+ return rc;
+ }
+ }
+
+ rc = dma_async_device_register(&od->ddev);
+ if (rc) {
+ pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
+ rc);
+ omap_dma_free(od);
+ } else {
+ platform_set_drvdata(pdev, od);
+ }
+
+ dev_info(&pdev->dev, "OMAP DMA engine driver\n");
+
+ return rc;
+}
+
+static int omap_dma_remove(struct platform_device *pdev)
+{
+ struct omap_dmadev *od = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&od->ddev);
+ omap_dma_free(od);
+
+ return 0;
+}
+
+static struct platform_driver omap_dma_driver = {
+ .probe = omap_dma_probe,
+ .remove = omap_dma_remove,
+ .driver = {
+ .name = "omap-dma-engine",
+ .owner = THIS_MODULE,
+ },
+};
+
+bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ if (chan->device->dev->driver == &omap_dma_driver.driver) {
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ unsigned req = *(unsigned *)param;
+
+ return req == c->dma_sig;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
+
+static struct platform_device *pdev;
+
+static const struct platform_device_info omap_dma_dev_info = {
+ .name = "omap-dma-engine",
+ .id = -1,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int omap_dma_init(void)
+{
+ int rc = platform_driver_register(&omap_dma_driver);
+
+ if (rc == 0) {
+ pdev = platform_device_register_full(&omap_dma_dev_info);
+ if (IS_ERR(pdev)) {
+ platform_driver_unregister(&omap_dma_driver);
+ rc = PTR_ERR(pdev);
+ }
+ }
+ return rc;
+}
+subsys_initcall(omap_dma_init);
+
+static void __exit omap_dma_exit(void)
+{
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&omap_dma_driver);
+}
+module_exit(omap_dma_exit);
+
+MODULE_AUTHOR("Russell King");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index ec78ccef913..f5a73606217 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -21,6 +21,8 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include "virt-dma.h"
+
#define NR_PHY_CHAN 6
#define DMA_ALIGN 3
#define DMA_MAX_SIZE 0x1fff
@@ -72,12 +74,13 @@ struct sa11x0_dma_sg {
};
struct sa11x0_dma_desc {
- struct dma_async_tx_descriptor tx;
+ struct virt_dma_desc vd;
+
u32 ddar;
size_t size;
+ unsigned period;
+ bool cyclic;
- /* maybe protected by c->lock */
- struct list_head node;
unsigned sglen;
struct sa11x0_dma_sg sg[0];
};
@@ -85,15 +88,11 @@ struct sa11x0_dma_desc {
struct sa11x0_dma_phy;
struct sa11x0_dma_chan {
- struct dma_chan chan;
- spinlock_t lock;
- dma_cookie_t lc;
+ struct virt_dma_chan vc;
- /* protected by c->lock */
+ /* protected by c->vc.lock */
struct sa11x0_dma_phy *phy;
enum dma_status status;
- struct list_head desc_submitted;
- struct list_head desc_issued;
/* protected by d->lock */
struct list_head node;
@@ -109,7 +108,7 @@ struct sa11x0_dma_phy {
struct sa11x0_dma_chan *vchan;
- /* Protected by c->lock */
+ /* Protected by c->vc.lock */
unsigned sg_load;
struct sa11x0_dma_desc *txd_load;
unsigned sg_done;
@@ -127,13 +126,12 @@ struct sa11x0_dma_dev {
spinlock_t lock;
struct tasklet_struct task;
struct list_head chan_pending;
- struct list_head desc_complete;
struct sa11x0_dma_phy phy[NR_PHY_CHAN];
};
static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
{
- return container_of(chan, struct sa11x0_dma_chan, chan);
+ return container_of(chan, struct sa11x0_dma_chan, vc.chan);
}
static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
@@ -141,27 +139,26 @@ static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
return container_of(dmadev, struct sa11x0_dma_dev, slave);
}
-static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx)
+static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
{
- return container_of(tx, struct sa11x0_dma_desc, tx);
+ struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+
+ return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
}
-static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
+static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
{
- if (list_empty(&c->desc_issued))
- return NULL;
-
- return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node);
+ kfree(container_of(vd, struct sa11x0_dma_desc, vd));
}
static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
{
- list_del(&txd->node);
+ list_del(&txd->vd.node);
p->txd_load = txd;
p->sg_load = 0;
dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
- p->num, txd, txd->tx.cookie, txd->ddar);
+ p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
}
static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
@@ -183,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
return;
if (p->sg_load == txd->sglen) {
- struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
+ if (!txd->cyclic) {
+ struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
- /*
- * We have reached the end of the current descriptor.
- * Peek at the next descriptor, and if compatible with
- * the current, start processing it.
- */
- if (txn && txn->ddar == txd->ddar) {
- txd = txn;
- sa11x0_dma_start_desc(p, txn);
+ /*
+ * We have reached the end of the current descriptor.
+ * Peek at the next descriptor, and if compatible with
+ * the current, start processing it.
+ */
+ if (txn && txn->ddar == txd->ddar) {
+ txd = txn;
+ sa11x0_dma_start_desc(p, txn);
+ } else {
+ p->txd_load = NULL;
+ return;
+ }
} else {
- p->txd_load = NULL;
- return;
+ /* Cyclic: reset back to beginning */
+ p->sg_load = 0;
}
}
@@ -229,21 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
struct sa11x0_dma_desc *txd = p->txd_done;
if (++p->sg_done == txd->sglen) {
- struct sa11x0_dma_dev *d = p->dev;
-
- dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n",
- p->num, p->txd_done, p->txd_done->tx.cookie);
-
- c->lc = txd->tx.cookie;
+ if (!txd->cyclic) {
+ vchan_cookie_complete(&txd->vd);
- spin_lock(&d->lock);
- list_add_tail(&txd->node, &d->desc_complete);
- spin_unlock(&d->lock);
+ p->sg_done = 0;
+ p->txd_done = p->txd_load;
- p->sg_done = 0;
- p->txd_done = p->txd_load;
+ if (!p->txd_done)
+ tasklet_schedule(&p->dev->task);
+ } else {
+ if ((p->sg_done % txd->period) == 0)
+ vchan_cyclic_callback(&txd->vd);
- tasklet_schedule(&d->task);
+ /* Cyclic: reset back to beginning */
+ p->sg_done = 0;
+ }
}
sa11x0_dma_start_sg(p, c);
@@ -280,7 +282,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
if (c) {
unsigned long flags;
- spin_lock_irqsave(&c->lock, flags);
+ spin_lock_irqsave(&c->vc.lock, flags);
/*
* Now that we're holding the lock, check that the vchan
* really is associated with this pchan before touching the
@@ -294,7 +296,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
if (dcsr & DCSR_DONEB)
sa11x0_dma_complete(p, c);
}
- spin_unlock_irqrestore(&c->lock, flags);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
}
return IRQ_HANDLED;
@@ -332,28 +334,15 @@ static void sa11x0_dma_tasklet(unsigned long arg)
struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
struct sa11x0_dma_phy *p;
struct sa11x0_dma_chan *c;
- struct sa11x0_dma_desc *txd, *txn;
- LIST_HEAD(head);
unsigned pch, pch_alloc = 0;
dev_dbg(d->slave.dev, "tasklet enter\n");
- /* Get the completed tx descriptors */
- spin_lock_irq(&d->lock);
- list_splice_init(&d->desc_complete, &head);
- spin_unlock_irq(&d->lock);
-
- list_for_each_entry(txd, &head, node) {
- c = to_sa11x0_dma_chan(txd->tx.chan);
-
- dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n",
- c, txd, txd->tx.cookie);
-
- spin_lock_irq(&c->lock);
+ list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
+ spin_lock_irq(&c->vc.lock);
p = c->phy;
- if (p) {
- if (!p->txd_done)
- sa11x0_dma_start_txd(c);
+ if (p && !p->txd_done) {
+ sa11x0_dma_start_txd(c);
if (!p->txd_done) {
/* No current txd associated with this channel */
dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
@@ -363,7 +352,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
p->vchan = NULL;
}
}
- spin_unlock_irq(&c->lock);
+ spin_unlock_irq(&c->vc.lock);
}
spin_lock_irq(&d->lock);
@@ -380,7 +369,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
/* Mark this channel allocated */
p->vchan = c;
- dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c);
+ dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
}
}
spin_unlock_irq(&d->lock);
@@ -390,42 +379,18 @@ static void sa11x0_dma_tasklet(unsigned long arg)
p = &d->phy[pch];
c = p->vchan;
- spin_lock_irq(&c->lock);
+ spin_lock_irq(&c->vc.lock);
c->phy = p;
sa11x0_dma_start_txd(c);
- spin_unlock_irq(&c->lock);
+ spin_unlock_irq(&c->vc.lock);
}
}
- /* Now free the completed tx descriptor, and call their callbacks */
- list_for_each_entry_safe(txd, txn, &head, node) {
- dma_async_tx_callback callback = txd->tx.callback;
- void *callback_param = txd->tx.callback_param;
-
- dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n",
- txd, txd->tx.cookie);
-
- kfree(txd);
-
- if (callback)
- callback(callback_param);
- }
-
dev_dbg(d->slave.dev, "tasklet exit\n");
}
-static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head)
-{
- struct sa11x0_dma_desc *txd, *txn;
-
- list_for_each_entry_safe(txd, txn, head, node) {
- dev_dbg(d->slave.dev, "txd %p: freeing\n", txd);
- kfree(txd);
- }
-}
-
static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
{
return 0;
@@ -436,18 +401,12 @@ static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
unsigned long flags;
- LIST_HEAD(head);
- spin_lock_irqsave(&c->lock, flags);
- spin_lock(&d->lock);
+ spin_lock_irqsave(&d->lock, flags);
list_del_init(&c->node);
- spin_unlock(&d->lock);
-
- list_splice_tail_init(&c->desc_submitted, &head);
- list_splice_tail_init(&c->desc_issued, &head);
- spin_unlock_irqrestore(&c->lock, flags);
+ spin_unlock_irqrestore(&d->lock, flags);
- sa11x0_dma_desc_free(d, &head);
+ vchan_free_chan_resources(&c->vc);
}
static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
@@ -472,33 +431,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
struct sa11x0_dma_phy *p;
- struct sa11x0_dma_desc *txd;
- dma_cookie_t last_used, last_complete;
+ struct virt_dma_desc *vd;
unsigned long flags;
enum dma_status ret;
- size_t bytes = 0;
-
- last_used = c->chan.cookie;
- last_complete = c->lc;
- ret = dma_async_is_complete(cookie, last_complete, last_used);
- if (ret == DMA_SUCCESS) {
- dma_set_tx_state(state, last_complete, last_used, 0);
+ ret = dma_cookie_status(&c->vc.chan, cookie, state);
+ if (ret == DMA_SUCCESS)
return ret;
- }
- spin_lock_irqsave(&c->lock, flags);
+ if (!state)
+ return c->status;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
p = c->phy;
- ret = c->status;
- if (p) {
- dma_addr_t addr = sa11x0_dma_pos(p);
- dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
+ /*
+ * If the cookie is on our issue queue, then the residue is
+ * its total size.
+ */
+ vd = vchan_find_desc(&c->vc, cookie);
+ if (vd) {
+ state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
+ } else if (!p) {
+ state->residue = 0;
+ } else {
+ struct sa11x0_dma_desc *txd;
+ size_t bytes = 0;
- txd = p->txd_done;
+ if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
+ txd = p->txd_done;
+ else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
+ txd = p->txd_load;
+ else
+ txd = NULL;
+
+ ret = c->status;
if (txd) {
+ dma_addr_t addr = sa11x0_dma_pos(p);
unsigned i;
+ dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
+
for (i = 0; i < txd->sglen; i++) {
dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
i, txd->sg[i].addr, txd->sg[i].len);
@@ -521,17 +494,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
bytes += txd->sg[i].len;
}
}
- if (txd != p->txd_load && p->txd_load)
- bytes += p->txd_load->size;
- }
- list_for_each_entry(txd, &c->desc_issued, node) {
- bytes += txd->size;
+ state->residue = bytes;
}
- spin_unlock_irqrestore(&c->lock, flags);
-
- dma_set_tx_state(state, last_complete, last_used, bytes);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
- dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes);
+ dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
return ret;
}
@@ -547,40 +514,20 @@ static void sa11x0_dma_issue_pending(struct dma_chan *chan)
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
unsigned long flags;
- spin_lock_irqsave(&c->lock, flags);
- list_splice_tail_init(&c->desc_submitted, &c->desc_issued);
- if (!list_empty(&c->desc_issued)) {
- spin_lock(&d->lock);
- if (!c->phy && list_empty(&c->node)) {
- list_add_tail(&c->node, &d->chan_pending);
- tasklet_schedule(&d->task);
- dev_dbg(d->slave.dev, "vchan %p: issued\n", c);
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (vchan_issue_pending(&c->vc)) {
+ if (!c->phy) {
+ spin_lock(&d->lock);
+ if (list_empty(&c->node)) {
+ list_add_tail(&c->node, &d->chan_pending);
+ tasklet_schedule(&d->task);
+ dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
+ }
+ spin_unlock(&d->lock);
}
- spin_unlock(&d->lock);
} else
- dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c);
- spin_unlock_irqrestore(&c->lock, flags);
-}
-
-static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx)
-{
- struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan);
- struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx);
- unsigned long flags;
-
- spin_lock_irqsave(&c->lock, flags);
- c->chan.cookie += 1;
- if (c->chan.cookie < 0)
- c->chan.cookie = 1;
- txd->tx.cookie = c->chan.cookie;
-
- list_add_tail(&txd->node, &c->desc_submitted);
- spin_unlock_irqrestore(&c->lock, flags);
-
- dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n",
- c, txd, txd->tx.cookie);
-
- return txd->tx.cookie;
+ dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
}
static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
@@ -596,7 +543,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
/* SA11x0 channels can only operate in their native direction */
if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
- c, c->ddar, dir);
+ &c->vc, c->ddar, dir);
return NULL;
}
@@ -612,14 +559,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
if (addr & DMA_ALIGN) {
dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
- c, addr);
+ &c->vc, addr);
return NULL;
}
}
txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
if (!txd) {
- dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c);
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
return NULL;
}
@@ -655,17 +602,73 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
} while (len);
}
- dma_async_tx_descriptor_init(&txd->tx, &c->chan);
- txd->tx.flags = flags;
- txd->tx.tx_submit = sa11x0_dma_tx_submit;
txd->ddar = c->ddar;
txd->size = size;
txd->sglen = j;
dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
- c, txd, txd->size, txd->sglen);
+ &c->vc, &txd->vd, txd->size, txd->sglen);
- return &txd->tx;
+ return vchan_tx_prep(&c->vc, &txd->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
+ enum dma_transfer_direction dir, void *context)
+{
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
+ struct sa11x0_dma_desc *txd;
+ unsigned i, j, k, sglen, sgperiod;
+
+ /* SA11x0 channels can only operate in their native direction */
+ if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
+ dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
+ &c->vc, c->ddar, dir);
+ return NULL;
+ }
+
+ sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
+ sglen = size * sgperiod / period;
+
+ /* Do not allow zero-sized txds */
+ if (sglen == 0)
+ return NULL;
+
+ txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC);
+ if (!txd) {
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
+ return NULL;
+ }
+
+ for (i = k = 0; i < size / period; i++) {
+ size_t tlen, len = period;
+
+ for (j = 0; j < sgperiod; j++, k++) {
+ tlen = len;
+
+ if (tlen > DMA_MAX_SIZE) {
+ unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
+ tlen = (tlen / mult) & ~DMA_ALIGN;
+ }
+
+ txd->sg[k].addr = addr;
+ txd->sg[k].len = tlen;
+ addr += tlen;
+ len -= tlen;
+ }
+
+ WARN_ON(len != 0);
+ }
+
+ WARN_ON(k != sglen);
+
+ txd->ddar = c->ddar;
+ txd->size = size;
+ txd->sglen = sglen;
+ txd->cyclic = 1;
+ txd->period = sgperiod;
+
+ return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
@@ -695,8 +698,8 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c
if (maxburst == 8)
ddar |= DDAR_BS;
- dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
- c, addr, width, maxburst);
+ dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
+ &c->vc, addr, width, maxburst);
c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
@@ -718,16 +721,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
case DMA_TERMINATE_ALL:
- dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c);
+ dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
/* Clear the tx descriptor lists */
- spin_lock_irqsave(&c->lock, flags);
- list_splice_tail_init(&c->desc_submitted, &head);
- list_splice_tail_init(&c->desc_issued, &head);
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vchan_get_all_descriptors(&c->vc, &head);
p = c->phy;
if (p) {
- struct sa11x0_dma_desc *txd, *txn;
-
dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
/* vchan is assigned to a pchan - stop the channel */
writel(DCSR_RUN | DCSR_IE |
@@ -735,17 +735,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
DCSR_STRTB | DCSR_DONEB,
p->base + DMA_DCSR_C);
- list_for_each_entry_safe(txd, txn, &d->desc_complete, node)
- if (txd->tx.chan == &c->chan)
- list_move(&txd->node, &head);
-
if (p->txd_load) {
if (p->txd_load != p->txd_done)
- list_add_tail(&p->txd_load->node, &head);
+ list_add_tail(&p->txd_load->vd.node, &head);
p->txd_load = NULL;
}
if (p->txd_done) {
- list_add_tail(&p->txd_done->node, &head);
+ list_add_tail(&p->txd_done->vd.node, &head);
p->txd_done = NULL;
}
c->phy = NULL;
@@ -754,14 +750,14 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
spin_unlock(&d->lock);
tasklet_schedule(&d->task);
}
- spin_unlock_irqrestore(&c->lock, flags);
- sa11x0_dma_desc_free(d, &head);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
ret = 0;
break;
case DMA_PAUSE:
- dev_dbg(d->slave.dev, "vchan %p: pause\n", c);
- spin_lock_irqsave(&c->lock, flags);
+ dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
+ spin_lock_irqsave(&c->vc.lock, flags);
if (c->status == DMA_IN_PROGRESS) {
c->status = DMA_PAUSED;
@@ -774,26 +770,26 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
spin_unlock(&d->lock);
}
}
- spin_unlock_irqrestore(&c->lock, flags);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
ret = 0;
break;
case DMA_RESUME:
- dev_dbg(d->slave.dev, "vchan %p: resume\n", c);
- spin_lock_irqsave(&c->lock, flags);
+ dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
+ spin_lock_irqsave(&c->vc.lock, flags);
if (c->status == DMA_PAUSED) {
c->status = DMA_IN_PROGRESS;
p = c->phy;
if (p) {
writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
- } else if (!list_empty(&c->desc_issued)) {
+ } else if (!list_empty(&c->vc.desc_issued)) {
spin_lock(&d->lock);
list_add_tail(&c->node, &d->chan_pending);
spin_unlock(&d->lock);
}
}
- spin_unlock_irqrestore(&c->lock, flags);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
ret = 0;
break;
@@ -853,15 +849,13 @@ static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
return -ENOMEM;
}
- c->chan.device = dmadev;
c->status = DMA_IN_PROGRESS;
c->ddar = chan_desc[i].ddar;
c->name = chan_desc[i].name;
- spin_lock_init(&c->lock);
- INIT_LIST_HEAD(&c->desc_submitted);
- INIT_LIST_HEAD(&c->desc_issued);
INIT_LIST_HEAD(&c->node);
- list_add_tail(&c->chan.device_node, &dmadev->channels);
+
+ c->vc.desc_free = sa11x0_dma_free_desc;
+ vchan_init(&c->vc, dmadev);
}
return dma_async_device_register(dmadev);
@@ -890,8 +884,9 @@ static void sa11x0_dma_free_channels(struct dma_device *dmadev)
{
struct sa11x0_dma_chan *c, *cn;
- list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) {
- list_del(&c->chan.device_node);
+ list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
+ list_del(&c->vc.chan.device_node);
+ tasklet_kill(&c->vc.task);
kfree(c);
}
}
@@ -915,7 +910,6 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
spin_lock_init(&d->lock);
INIT_LIST_HEAD(&d->chan_pending);
- INIT_LIST_HEAD(&d->desc_complete);
d->base = ioremap(res->start, resource_size(res));
if (!d->base) {
@@ -947,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
}
dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
+ d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
if (ret) {
dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
new file mode 100644
index 00000000000..6f80432a3f0
--- /dev/null
+++ b/drivers/dma/virt-dma.c
@@ -0,0 +1,123 @@
+/*
+ * Virtual DMA channel support for DMAengine
+ *
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+
+static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct virt_dma_desc, tx);
+}
+
+dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct virt_dma_chan *vc = to_virt_chan(tx->chan);
+ struct virt_dma_desc *vd = to_virt_desc(tx);
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ cookie = dma_cookie_assign(tx);
+
+ list_add_tail(&vd->node, &vc->desc_submitted);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
+ vc, vd, cookie);
+
+ return cookie;
+}
+EXPORT_SYMBOL_GPL(vchan_tx_submit);
+
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
+ dma_cookie_t cookie)
+{
+ struct virt_dma_desc *vd;
+
+ list_for_each_entry(vd, &vc->desc_issued, node)
+ if (vd->tx.cookie == cookie)
+ return vd;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vchan_find_desc);
+
+/*
+ * This tasklet handles the completion of a DMA descriptor by
+ * calling its callback and freeing it.
+ */
+static void vchan_complete(unsigned long arg)
+{
+ struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
+ struct virt_dma_desc *vd;
+ dma_async_tx_callback cb = NULL;
+ void *cb_data = NULL;
+ LIST_HEAD(head);
+
+ spin_lock_irq(&vc->lock);
+ list_splice_tail_init(&vc->desc_completed, &head);
+ vd = vc->cyclic;
+ if (vd) {
+ vc->cyclic = NULL;
+ cb = vd->tx.callback;
+ cb_data = vd->tx.callback_param;
+ }
+ spin_unlock_irq(&vc->lock);
+
+ if (cb)
+ cb(cb_data);
+
+ while (!list_empty(&head)) {
+ vd = list_first_entry(&head, struct virt_dma_desc, node);
+ cb = vd->tx.callback;
+ cb_data = vd->tx.callback_param;
+
+ list_del(&vd->node);
+
+ vc->desc_free(vd);
+
+ if (cb)
+ cb(cb_data);
+ }
+}
+
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
+{
+ while (!list_empty(head)) {
+ struct virt_dma_desc *vd = list_first_entry(head,
+ struct virt_dma_desc, node);
+ list_del(&vd->node);
+ dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
+ vc->desc_free(vd);
+ }
+}
+EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
+
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
+{
+ dma_cookie_init(&vc->chan);
+
+ spin_lock_init(&vc->lock);
+ INIT_LIST_HEAD(&vc->desc_submitted);
+ INIT_LIST_HEAD(&vc->desc_issued);
+ INIT_LIST_HEAD(&vc->desc_completed);
+
+ tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
+
+ vc->chan.device = dmadev;
+ list_add_tail(&vc->chan.device_node, &dmadev->channels);
+}
+EXPORT_SYMBOL_GPL(vchan_init);
+
+MODULE_AUTHOR("Russell King");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
new file mode 100644
index 00000000000..85c19d63f9f
--- /dev/null
+++ b/drivers/dma/virt-dma.h
@@ -0,0 +1,152 @@
+/*
+ * Virtual DMA channel support for DMAengine
+ *
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef VIRT_DMA_H
+#define VIRT_DMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#include "dmaengine.h"
+
+struct virt_dma_desc {
+ struct dma_async_tx_descriptor tx;
+ /* protected by vc.lock */
+ struct list_head node;
+};
+
+struct virt_dma_chan {
+ struct dma_chan chan;
+ struct tasklet_struct task;
+ void (*desc_free)(struct virt_dma_desc *);
+
+ spinlock_t lock;
+
+ /* protected by vc.lock */
+ struct list_head desc_submitted;
+ struct list_head desc_issued;
+ struct list_head desc_completed;
+
+ struct virt_dma_desc *cyclic;
+};
+
+static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct virt_dma_chan, chan);
+}
+
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
+
+/**
+ * vchan_tx_prep - prepare a descriptor
+ * vc: virtual channel allocating this descriptor
+ * vd: virtual descriptor to prepare
+ * tx_flags: flags argument passed in to prepare function
+ */
+static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
+ struct virt_dma_desc *vd, unsigned long tx_flags)
+{
+ extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
+
+ dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
+ vd->tx.flags = tx_flags;
+ vd->tx.tx_submit = vchan_tx_submit;
+
+ return &vd->tx;
+}
+
+/**
+ * vchan_issue_pending - move submitted descriptors to issued list
+ * vc: virtual channel to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
+{
+ list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
+ return !list_empty(&vc->desc_issued);
+}
+
+/**
+ * vchan_cookie_complete - report completion of a descriptor
+ * vd: virtual descriptor to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ dma_cookie_complete(&vd->tx);
+ dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
+ vd, vd->tx.cookie);
+ list_add_tail(&vd->node, &vc->desc_completed);
+
+ tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_cyclic_callback - report the completion of a period
+ * vd: virtual descriptor
+ */
+static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ vc->cyclic = vd;
+ tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_next_desc - peek at the next descriptor to be processed
+ * vc: virtual channel to obtain descriptor from
+ *
+ * vc.lock must be held by caller
+ */
+static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
+{
+ if (list_empty(&vc->desc_issued))
+ return NULL;
+
+ return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node);
+}
+
+/**
+ * vchan_get_all_descriptors - obtain all submitted and issued descriptors
+ * vc: virtual channel to get descriptors from
+ * head: list of descriptors found
+ *
+ * vc.lock must be held by caller
+ *
+ * Removes all submitted and issued descriptors from internal lists, and
+ * provides a list of all descriptors found
+ */
+static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
+ struct list_head *head)
+{
+ list_splice_tail_init(&vc->desc_submitted, head);
+ list_splice_tail_init(&vc->desc_issued, head);
+ list_splice_tail_init(&vc->desc_completed, head);
+}
+
+static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
+{
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vc->lock, flags);
+ vchan_get_all_descriptors(vc, &head);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+}
+
+#endif
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 3e8dcf8d2e0..50e08f03aa6 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -17,10 +17,12 @@
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
+#include <linux/omap-dma.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/clk.h>
@@ -128,6 +130,10 @@ struct mmc_omap_host {
unsigned char id; /* 16xx chips have 2 MMC blocks */
struct clk * iclk;
struct clk * fclk;
+ struct dma_chan *dma_rx;
+ u32 dma_rx_burst;
+ struct dma_chan *dma_tx;
+ u32 dma_tx_burst;
struct resource *mem_res;
void __iomem *virt_base;
unsigned int phys_base;
@@ -153,12 +159,8 @@ struct mmc_omap_host {
unsigned use_dma:1;
unsigned brs_received:1, dma_done:1;
- unsigned dma_is_read:1;
unsigned dma_in_use:1;
- int dma_ch;
spinlock_t dma_lock;
- struct timer_list dma_timer;
- unsigned dma_len;
struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS];
struct mmc_omap_slot *current_slot;
@@ -406,18 +408,25 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
int abort)
{
enum dma_data_direction dma_data_dir;
+ struct device *dev = mmc_dev(host->mmc);
+ struct dma_chan *c;
- BUG_ON(host->dma_ch < 0);
- if (data->error)
- omap_stop_dma(host->dma_ch);
- /* Release DMA channel lazily */
- mod_timer(&host->dma_timer, jiffies + HZ);
- if (data->flags & MMC_DATA_WRITE)
+ if (data->flags & MMC_DATA_WRITE) {
dma_data_dir = DMA_TO_DEVICE;
- else
+ c = host->dma_tx;
+ } else {
dma_data_dir = DMA_FROM_DEVICE;
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
- dma_data_dir);
+ c = host->dma_rx;
+ }
+ if (c) {
+ if (data->error) {
+ dmaengine_terminate_all(c);
+ /* Claim nothing transferred on error... */
+ data->bytes_xfered = 0;
+ }
+ dev = c->device->dev;
+ }
+ dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
}
static void mmc_omap_send_stop_work(struct work_struct *work)
@@ -525,16 +534,6 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
}
static void
-mmc_omap_dma_timer(unsigned long data)
-{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
-
- BUG_ON(host->dma_ch < 0);
- omap_free_dma(host->dma_ch);
- host->dma_ch = -1;
-}
-
-static void
mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
{
unsigned long flags;
@@ -891,159 +890,15 @@ static void mmc_omap_cover_handler(unsigned long param)
jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
}
-/* Prepare to transfer the next segment of a scatterlist */
-static void
-mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
+static void mmc_omap_dma_callback(void *priv)
{
- int dma_ch = host->dma_ch;
- unsigned long data_addr;
- u16 buf, frame;
- u32 count;
- struct scatterlist *sg = &data->sg[host->sg_idx];
- int src_port = 0;
- int dst_port = 0;
- int sync_dev = 0;
-
- data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
- frame = data->blksz;
- count = sg_dma_len(sg);
-
- if ((data->blocks == 1) && (count > data->blksz))
- count = frame;
-
- host->dma_len = count;
-
- /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
- * Use 16 or 32 word frames when the blocksize is at least that large.
- * Blocksize is usually 512 bytes; but not for some SD reads.
- */
- if (cpu_is_omap15xx() && frame > 32)
- frame = 32;
- else if (frame > 64)
- frame = 64;
- count /= frame;
- frame >>= 1;
-
- if (!(data->flags & MMC_DATA_WRITE)) {
- buf = 0x800f | ((frame - 1) << 8);
-
- if (cpu_class_is_omap1()) {
- src_port = OMAP_DMA_PORT_TIPB;
- dst_port = OMAP_DMA_PORT_EMIFF;
- }
- if (cpu_is_omap24xx())
- sync_dev = OMAP24XX_DMA_MMC1_RX;
-
- omap_set_dma_src_params(dma_ch, src_port,
- OMAP_DMA_AMODE_CONSTANT,
- data_addr, 0, 0);
- omap_set_dma_dest_params(dma_ch, dst_port,
- OMAP_DMA_AMODE_POST_INC,
- sg_dma_address(sg), 0, 0);
- omap_set_dma_dest_data_pack(dma_ch, 1);
- omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
- } else {
- buf = 0x0f80 | ((frame - 1) << 0);
-
- if (cpu_class_is_omap1()) {
- src_port = OMAP_DMA_PORT_EMIFF;
- dst_port = OMAP_DMA_PORT_TIPB;
- }
- if (cpu_is_omap24xx())
- sync_dev = OMAP24XX_DMA_MMC1_TX;
-
- omap_set_dma_dest_params(dma_ch, dst_port,
- OMAP_DMA_AMODE_CONSTANT,
- data_addr, 0, 0);
- omap_set_dma_src_params(dma_ch, src_port,
- OMAP_DMA_AMODE_POST_INC,
- sg_dma_address(sg), 0, 0);
- omap_set_dma_src_data_pack(dma_ch, 1);
- omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
- }
+ struct mmc_omap_host *host = priv;
+ struct mmc_data *data = host->data;
- /* Max limit for DMA frame count is 0xffff */
- BUG_ON(count > 0xffff);
+ /* If we got to the end of DMA, assume everything went well */
+ data->bytes_xfered += data->blocks * data->blksz;
- OMAP_MMC_WRITE(host, BUF, buf);
- omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
- frame, count, OMAP_DMA_SYNC_FRAME,
- sync_dev, 0);
-}
-
-/* A scatterlist segment completed */
-static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
-{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
- struct mmc_data *mmcdat = host->data;
-
- if (unlikely(host->dma_ch < 0)) {
- dev_err(mmc_dev(host->mmc),
- "DMA callback while DMA not enabled\n");
- return;
- }
- /* FIXME: We really should do something to _handle_ the errors */
- if (ch_status & OMAP1_DMA_TOUT_IRQ) {
- dev_err(mmc_dev(host->mmc),"DMA timeout\n");
- return;
- }
- if (ch_status & OMAP_DMA_DROP_IRQ) {
- dev_err(mmc_dev(host->mmc), "DMA sync error\n");
- return;
- }
- if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
- return;
- }
- mmcdat->bytes_xfered += host->dma_len;
- host->sg_idx++;
- if (host->sg_idx < host->sg_len) {
- mmc_omap_prepare_dma(host, host->data);
- omap_start_dma(host->dma_ch);
- } else
- mmc_omap_dma_done(host, host->data);
-}
-
-static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
-{
- const char *dma_dev_name;
- int sync_dev, dma_ch, is_read, r;
-
- is_read = !(data->flags & MMC_DATA_WRITE);
- del_timer_sync(&host->dma_timer);
- if (host->dma_ch >= 0) {
- if (is_read == host->dma_is_read)
- return 0;
- omap_free_dma(host->dma_ch);
- host->dma_ch = -1;
- }
-
- if (is_read) {
- if (host->id == 0) {
- sync_dev = OMAP_DMA_MMC_RX;
- dma_dev_name = "MMC1 read";
- } else {
- sync_dev = OMAP_DMA_MMC2_RX;
- dma_dev_name = "MMC2 read";
- }
- } else {
- if (host->id == 0) {
- sync_dev = OMAP_DMA_MMC_TX;
- dma_dev_name = "MMC1 write";
- } else {
- sync_dev = OMAP_DMA_MMC2_TX;
- dma_dev_name = "MMC2 write";
- }
- }
- r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb,
- host, &dma_ch);
- if (r != 0) {
- dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
- return r;
- }
- host->dma_ch = dma_ch;
- host->dma_is_read = is_read;
-
- return 0;
+ mmc_omap_dma_done(host, data);
}
static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
@@ -1118,33 +973,85 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
host->sg_idx = 0;
if (use_dma) {
- if (mmc_omap_get_dma_channel(host, data) == 0) {
- enum dma_data_direction dma_data_dir;
-
- if (data->flags & MMC_DATA_WRITE)
- dma_data_dir = DMA_TO_DEVICE;
- else
- dma_data_dir = DMA_FROM_DEVICE;
-
- host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
- sg_len, dma_data_dir);
- host->total_bytes_left = 0;
- mmc_omap_prepare_dma(host, req->data);
- host->brs_received = 0;
- host->dma_done = 0;
- host->dma_in_use = 1;
- } else
- use_dma = 0;
+ enum dma_data_direction dma_data_dir;
+ struct dma_async_tx_descriptor *tx;
+ struct dma_chan *c;
+ u32 burst, *bp;
+ u16 buf;
+
+ /*
+ * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx
+ * and 24xx. Use 16 or 32 word frames when the
+ * blocksize is at least that large. Blocksize is
+ * usually 512 bytes; but not for some SD reads.
+ */
+ burst = cpu_is_omap15xx() ? 32 : 64;
+ if (burst > data->blksz)
+ burst = data->blksz;
+
+ burst >>= 1;
+
+ if (data->flags & MMC_DATA_WRITE) {
+ c = host->dma_tx;
+ bp = &host->dma_tx_burst;
+ buf = 0x0f80 | (burst - 1) << 0;
+ dma_data_dir = DMA_TO_DEVICE;
+ } else {
+ c = host->dma_rx;
+ bp = &host->dma_rx_burst;
+ buf = 0x800f | (burst - 1) << 8;
+ dma_data_dir = DMA_FROM_DEVICE;
+ }
+
+ if (!c)
+ goto use_pio;
+
+ /* Only reconfigure if we have a different burst size */
+ if (*bp != burst) {
+ struct dma_slave_config cfg;
+
+ cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
+ cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.src_maxburst = burst;
+ cfg.dst_maxburst = burst;
+
+ if (dmaengine_slave_config(c, &cfg))
+ goto use_pio;
+
+ *bp = burst;
+ }
+
+ host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
+ dma_data_dir);
+ if (host->sg_len == 0)
+ goto use_pio;
+
+ tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
+ data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx)
+ goto use_pio;
+
+ OMAP_MMC_WRITE(host, BUF, buf);
+
+ tx->callback = mmc_omap_dma_callback;
+ tx->callback_param = host;
+ dmaengine_submit(tx);
+ host->brs_received = 0;
+ host->dma_done = 0;
+ host->dma_in_use = 1;
+ return;
}
+ use_pio:
/* Revert to PIO? */
- if (!use_dma) {
- OMAP_MMC_WRITE(host, BUF, 0x1f1f);
- host->total_bytes_left = data->blocks * block_size;
- host->sg_len = sg_len;
- mmc_omap_sg_to_buf(host);
- host->dma_in_use = 0;
- }
+ OMAP_MMC_WRITE(host, BUF, 0x1f1f);
+ host->total_bytes_left = data->blocks * block_size;
+ host->sg_len = sg_len;
+ mmc_omap_sg_to_buf(host);
+ host->dma_in_use = 0;
}
static void mmc_omap_start_request(struct mmc_omap_host *host,
@@ -1157,8 +1064,12 @@ static void mmc_omap_start_request(struct mmc_omap_host *host,
/* only touch fifo AFTER the controller readies it */
mmc_omap_prepare_data(host, req);
mmc_omap_start_command(host, req->cmd);
- if (host->dma_in_use)
- omap_start_dma(host->dma_ch);
+ if (host->dma_in_use) {
+ struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
+ host->dma_tx : host->dma_rx;
+
+ dma_async_issue_pending(c);
+ }
}
static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
@@ -1400,6 +1311,8 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
struct mmc_omap_host *host = NULL;
struct resource *res;
+ dma_cap_mask_t mask;
+ unsigned sig;
int i, ret = 0;
int irq;
@@ -1439,7 +1352,6 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
spin_lock_init(&host->dma_lock);
- setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
spin_lock_init(&host->slot_lock);
init_waitqueue_head(&host->slot_wq);
@@ -1450,11 +1362,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
host->id = pdev->id;
host->mem_res = res;
host->irq = irq;
-
host->use_dma = 1;
- host->dev->dma_mask = &pdata->dma_mask;
- host->dma_ch = -1;
-
host->irq = irq;
host->phys_base = host->mem_res->start;
host->virt_base = ioremap(res->start, resource_size(res));
@@ -1474,9 +1382,48 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
goto err_free_iclk;
}
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->dma_tx_burst = -1;
+ host->dma_rx_burst = -1;
+
+ if (cpu_is_omap24xx())
+ sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
+ else
+ sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
+ host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+#if 0
+ if (!host->dma_tx) {
+ dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n",
+ sig);
+ goto err_dma;
+ }
+#else
+ if (!host->dma_tx)
+ dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
+ sig);
+#endif
+ if (cpu_is_omap24xx())
+ sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
+ else
+ sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
+ host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+#if 0
+ if (!host->dma_rx) {
+ dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n",
+ sig);
+ goto err_dma;
+ }
+#else
+ if (!host->dma_rx)
+ dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
+ sig);
+#endif
+
ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
if (ret)
- goto err_free_fclk;
+ goto err_free_dma;
if (pdata->init != NULL) {
ret = pdata->init(&pdev->dev);
@@ -1510,7 +1457,11 @@ err_plat_cleanup:
pdata->cleanup(&pdev->dev);
err_free_irq:
free_irq(host->irq, host);
-err_free_fclk:
+err_free_dma:
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
clk_put(host->fclk);
err_free_iclk:
clk_disable(host->iclk);
@@ -1545,6 +1496,11 @@ static int __devexit mmc_omap_remove(struct platform_device *pdev)
clk_disable(host->iclk);
clk_put(host->iclk);
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
+
iounmap(host->virt_base);
release_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index bc28627af66..3a09f93cc3b 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
+#include <linux/dmaengine.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -29,6 +30,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
+#include <linux/omap-dma.h>
#include <linux/mmc/host.h>
#include <linux/mmc/core.h>
#include <linux/mmc/mmc.h>
@@ -37,7 +39,6 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
-#include <plat/dma.h>
#include <mach/hardware.h>
#include <plat/board.h>
#include <plat/mmc.h>
@@ -166,7 +167,8 @@ struct omap_hsmmc_host {
int suspended;
int irq;
int use_dma, dma_ch;
- int dma_line_tx, dma_line_rx;
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
int slot_id;
int response_busy;
int context_loss;
@@ -797,6 +799,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
return DMA_FROM_DEVICE;
}
+static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
+ struct mmc_data *data)
+{
+ return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
+}
+
static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
{
int dma_ch;
@@ -889,10 +897,13 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
spin_unlock_irqrestore(&host->irq_lock, flags);
if (host->use_dma && dma_ch != -1) {
- dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
- host->data->sg_len,
+ struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
+
+ dmaengine_terminate_all(chan);
+ dma_unmap_sg(chan->device->dev,
+ host->data->sg, host->data->sg_len,
omap_hsmmc_get_dma_dir(host, host->data));
- omap_free_dma(dma_ch);
+
host->data->host_cookie = 0;
}
host->data = NULL;
@@ -1190,90 +1201,29 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host,
- struct mmc_data *data)
-{
- int sync_dev;
-
- if (data->flags & MMC_DATA_WRITE)
- sync_dev = host->dma_line_tx;
- else
- sync_dev = host->dma_line_rx;
- return sync_dev;
-}
-
-static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
- struct mmc_data *data,
- struct scatterlist *sgl)
-{
- int blksz, nblk, dma_ch;
-
- dma_ch = host->dma_ch;
- if (data->flags & MMC_DATA_WRITE) {
- omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
- omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- sg_dma_address(sgl), 0, 0);
- } else {
- omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
- omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- sg_dma_address(sgl), 0, 0);
- }
-
- blksz = host->data->blksz;
- nblk = sg_dma_len(sgl) / blksz;
-
- omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
- blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
- omap_hsmmc_get_dma_sync_dev(host, data),
- !(data->flags & MMC_DATA_WRITE));
-
- omap_start_dma(dma_ch);
-}
-
-/*
- * DMA call back function
- */
-static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
+static void omap_hsmmc_dma_callback(void *param)
{
- struct omap_hsmmc_host *host = cb_data;
+ struct omap_hsmmc_host *host = param;
+ struct dma_chan *chan;
struct mmc_data *data;
- int dma_ch, req_in_progress;
- unsigned long flags;
-
- if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
- dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
- ch_status);
- return;
- }
+ int req_in_progress;
- spin_lock_irqsave(&host->irq_lock, flags);
+ spin_lock_irq(&host->irq_lock);
if (host->dma_ch < 0) {
- spin_unlock_irqrestore(&host->irq_lock, flags);
+ spin_unlock_irq(&host->irq_lock);
return;
}
data = host->mrq->data;
- host->dma_sg_idx++;
- if (host->dma_sg_idx < host->dma_len) {
- /* Fire up the next transfer. */
- omap_hsmmc_config_dma_params(host, data,
- data->sg + host->dma_sg_idx);
- spin_unlock_irqrestore(&host->irq_lock, flags);
- return;
- }
-
+ chan = omap_hsmmc_get_dma_chan(host, data);
if (!data->host_cookie)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ dma_unmap_sg(chan->device->dev,
+ data->sg, data->sg_len,
omap_hsmmc_get_dma_dir(host, data));
req_in_progress = host->req_in_progress;
- dma_ch = host->dma_ch;
host->dma_ch = -1;
- spin_unlock_irqrestore(&host->irq_lock, flags);
-
- omap_free_dma(dma_ch);
+ spin_unlock_irq(&host->irq_lock);
/* If DMA has finished after TC, complete the request */
if (!req_in_progress) {
@@ -1286,7 +1236,8 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
struct mmc_data *data,
- struct omap_hsmmc_next *next)
+ struct omap_hsmmc_next *next,
+ struct dma_chan *chan)
{
int dma_len;
@@ -1301,8 +1252,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
/* Check if next job is already prepared */
if (next ||
(!next && data->host_cookie != host->next_data.cookie)) {
- dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len,
+ dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
omap_hsmmc_get_dma_dir(host, data));
} else {
@@ -1329,8 +1279,11 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
struct mmc_request *req)
{
- int dma_ch = 0, ret = 0, i;
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor *tx;
+ int ret = 0, i;
struct mmc_data *data = req->data;
+ struct dma_chan *chan;
/* Sanity check: all the SG entries must be aligned by block size. */
for (i = 0; i < data->sg_len; i++) {
@@ -1348,22 +1301,41 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
BUG_ON(host->dma_ch != -1);
- ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
- "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
- if (ret != 0) {
- dev_err(mmc_dev(host->mmc),
- "%s: omap_request_dma() failed with %d\n",
- mmc_hostname(host->mmc), ret);
+ chan = omap_hsmmc_get_dma_chan(host, data);
+
+ cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
+ cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = data->blksz / 4;
+ cfg.dst_maxburst = data->blksz / 4;
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret)
return ret;
- }
- ret = omap_hsmmc_pre_dma_transfer(host, data, NULL);
+
+ ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
if (ret)
return ret;
- host->dma_ch = dma_ch;
- host->dma_sg_idx = 0;
+ tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
+ data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx) {
+ dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
+ /* FIXME: cleanup */
+ return -1;
+ }
+
+ tx->callback = omap_hsmmc_dma_callback;
+ tx->callback_param = host;
- omap_hsmmc_config_dma_params(host, data, data->sg);
+ /* Does not fail */
+ dmaengine_submit(tx);
+
+ host->dma_ch = 1;
+
+ dma_async_issue_pending(chan);
return 0;
}
@@ -1445,11 +1417,11 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
struct omap_hsmmc_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
- if (host->use_dma) {
- if (data->host_cookie)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len,
- omap_hsmmc_get_dma_dir(host, data));
+ if (host->use_dma && data->host_cookie) {
+ struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
+
+ dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
data->host_cookie = 0;
}
}
@@ -1464,10 +1436,13 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
return ;
}
- if (host->use_dma)
+ if (host->use_dma) {
+ struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
+
if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
- &host->next_data))
+ &host->next_data, c))
mrq->data->host_cookie = 0;
+ }
}
/*
@@ -1800,6 +1775,8 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
struct resource *res;
int ret, irq;
const struct of_device_id *match;
+ dma_cap_mask_t mask;
+ unsigned tx_req, rx_req;
match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
if (match) {
@@ -1844,7 +1821,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
host->pdata = pdata;
host->dev = &pdev->dev;
host->use_dma = 1;
- host->dev->dma_mask = &pdata->dma_mask;
host->dma_ch = -1;
host->irq = irq;
host->slot_id = 0;
@@ -1934,7 +1910,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
ret = -ENXIO;
goto err_irq;
}
- host->dma_line_tx = res->start;
+ tx_req = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
if (!res) {
@@ -1942,7 +1918,24 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
ret = -ENXIO;
goto err_irq;
}
- host->dma_line_rx = res->start;
+ rx_req = res->start;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
+ if (!host->rx_chan) {
+ dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
+ ret = -ENXIO;
+ goto err_irq;
+ }
+
+ host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
+ if (!host->tx_chan) {
+ dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
+ ret = -ENXIO;
+ goto err_irq;
+ }
/* Request IRQ for MMC operations */
ret = request_irq(host->irq, omap_hsmmc_irq, 0,
@@ -2021,6 +2014,10 @@ err_reg:
err_irq_cd_init:
free_irq(host->irq, host);
err_irq:
+ if (host->tx_chan)
+ dma_release_channel(host->tx_chan);
+ if (host->rx_chan)
+ dma_release_channel(host->rx_chan);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
clk_put(host->fclk);
@@ -2056,6 +2053,11 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
if (mmc_slot(host).card_detect_irq)
free_irq(mmc_slot(host).card_detect_irq, host);
+ if (host->tx_chan)
+ dma_release_channel(host->tx_chan);
+ if (host->rx_chan)
+ dma_release_channel(host->rx_chan);
+
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
clk_put(host->fclk);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index d7f681d0c9b..e9309b3659e 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -9,6 +9,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -18,6 +19,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/omap-dma.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -123,7 +125,7 @@ struct omap_nand_info {
int gpmc_cs;
unsigned long phys_base;
struct completion comp;
- int dma_ch;
+ struct dma_chan *dma;
int gpmc_irq;
enum {
OMAP_NAND_IO_READ = 0, /* read */
@@ -336,12 +338,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
}
/*
- * omap_nand_dma_cb: callback on the completion of dma transfer
- * @lch: logical channel
- * @ch_satuts: channel status
+ * omap_nand_dma_callback: callback on the completion of dma transfer
* @data: pointer to completion data structure
*/
-static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
+static void omap_nand_dma_callback(void *data)
{
complete((struct completion *) data);
}
@@ -358,17 +358,13 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
{
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
+ struct dma_async_tx_descriptor *tx;
enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
DMA_FROM_DEVICE;
- dma_addr_t dma_addr;
- int ret;
+ struct scatterlist sg;
unsigned long tim, limit;
-
- /* The fifo depth is 64 bytes max.
- * But configure the FIFO-threahold to 32 to get a sync at each frame
- * and frame length is 32 bytes.
- */
- int buf_len = len >> 6;
+ unsigned n;
+ int ret;
if (addr >= high_memory) {
struct page *p1;
@@ -382,40 +378,33 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
}
- dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
- if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
+ sg_init_one(&sg, addr, len);
+ n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
+ if (n == 0) {
dev_err(&info->pdev->dev,
"Couldn't DMA map a %d byte buffer\n", len);
goto out_copy;
}
- if (is_write) {
- omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- info->phys_base, 0, 0);
- omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- dma_addr, 0, 0);
- omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
- 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
- OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
- } else {
- omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- info->phys_base, 0, 0);
- omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- dma_addr, 0, 0);
- omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
- 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
- OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
- }
- /* configure and start prefetch transfer */
+ tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
+ is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx)
+ goto out_copy_unmap;
+
+ tx->callback = omap_nand_dma_callback;
+ tx->callback_param = &info->comp;
+ dmaengine_submit(tx);
+
+ /* configure and start prefetch transfer */
ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
if (ret)
/* PFPW engine is busy, use cpu copy method */
goto out_copy_unmap;
init_completion(&info->comp);
-
- omap_start_dma(info->dma_ch);
+ dma_async_issue_pending(info->dma);
/* setup and start DMA using dma_addr */
wait_for_completion(&info->comp);
@@ -427,11 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
/* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs);
- dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
+ dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
return 0;
out_copy_unmap:
- dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
+ dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
out_copy:
if (info->nand.options & NAND_BUSWIDTH_16)
is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -1164,6 +1153,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
struct omap_nand_platform_data *pdata;
int err;
int i, offset;
+ dma_cap_mask_t mask;
+ unsigned sig;
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -1244,18 +1235,31 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
break;
case NAND_OMAP_PREFETCH_DMA:
- err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
- omap_nand_dma_cb, &info->comp, &info->dma_ch);
- if (err < 0) {
- info->dma_ch = -1;
- dev_err(&pdev->dev, "DMA request failed!\n");
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ sig = OMAP24XX_DMA_GPMC;
+ info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+ if (!info->dma) {
+ dev_err(&pdev->dev, "DMA engine request failed\n");
+ err = -ENXIO;
goto out_release_mem_region;
} else {
- omap_set_dma_dest_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
- omap_set_dma_src_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
-
+ struct dma_slave_config cfg;
+ int rc;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.src_addr = info->phys_base;
+ cfg.dst_addr = info->phys_base;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 16;
+ cfg.dst_maxburst = 16;
+ rc = dmaengine_slave_config(info->dma, &cfg);
+ if (rc) {
+ dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
+ rc);
+ goto out_release_mem_region;
+ }
info->nand.read_buf = omap_read_buf_dma_pref;
info->nand.write_buf = omap_write_buf_dma_pref;
}
@@ -1358,6 +1362,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
return 0;
out_release_mem_region:
+ if (info->dma)
+ dma_release_channel(info->dma);
release_mem_region(info->phys_base, NAND_IO_SIZE);
out_free_info:
kfree(info);
@@ -1373,8 +1379,8 @@ static int omap_nand_remove(struct platform_device *pdev)
omap3_free_bch(&info->mtd);
platform_set_drvdata(pdev, NULL);
- if (info->dma_ch != -1)
- omap_free_dma(info->dma_ch);
+ if (info->dma)
+ dma_release_channel(info->dma);
if (info->gpmc_irq)
free_irq(info->gpmc_irq, info);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7d46b15e152..bc4778175e3 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -28,6 +28,8 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
@@ -39,7 +41,6 @@
#include <linux/spi/spi.h>
-#include <plat/dma.h>
#include <plat/clock.h>
#include <plat/mcspi.h>
@@ -93,8 +94,8 @@
/* We have 2 DMA channels per CS, one for RX and one for TX */
struct omap2_mcspi_dma {
- int dma_tx_channel;
- int dma_rx_channel;
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
int dma_tx_sync_dev;
int dma_rx_sync_dev;
@@ -300,20 +301,46 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
return 0;
}
+static void omap2_mcspi_rx_callback(void *data)
+{
+ struct spi_device *spi = data;
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ complete(&mcspi_dma->dma_rx_completion);
+
+ /* We must disable the DMA RX request */
+ omap2_mcspi_set_dma_req(spi, 1, 0);
+}
+
+static void omap2_mcspi_tx_callback(void *data)
+{
+ struct spi_device *spi = data;
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ complete(&mcspi_dma->dma_tx_completion);
+
+ /* We must disable the DMA TX request */
+ omap2_mcspi_set_dma_req(spi, 0, 0);
+}
+
static unsigned
omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
{
struct omap2_mcspi *mcspi;
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi_dma *mcspi_dma;
- unsigned int count, c;
- unsigned long base, tx_reg, rx_reg;
- int word_len, data_type, element_count;
+ unsigned int count;
+ int word_len, element_count;
int elements = 0;
u32 l;
u8 * rx;
const u8 * tx;
void __iomem *chstat_reg;
+ struct dma_slave_config cfg;
+ enum dma_slave_buswidth width;
+ unsigned es;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
@@ -321,68 +348,92 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+ if (cs->word_len <= 8) {
+ width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ es = 1;
+ } else if (cs->word_len <= 16) {
+ width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ es = 2;
+ } else {
+ width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ es = 4;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
+ cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
+ cfg.src_addr_width = width;
+ cfg.dst_addr_width = width;
+ cfg.src_maxburst = 1;
+ cfg.dst_maxburst = 1;
+
+ if (xfer->tx_buf && mcspi_dma->dma_tx) {
+ struct dma_async_tx_descriptor *tx;
+ struct scatterlist sg;
+
+ dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
+
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = xfer->tx_dma;
+ sg_dma_len(&sg) = xfer->len;
+
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (tx) {
+ tx->callback = omap2_mcspi_tx_callback;
+ tx->callback_param = spi;
+ dmaengine_submit(tx);
+ } else {
+ /* FIXME: fall back to PIO? */
+ }
+ }
+
+ if (xfer->rx_buf && mcspi_dma->dma_rx) {
+ struct dma_async_tx_descriptor *tx;
+ struct scatterlist sg;
+ size_t len = xfer->len - es;
+
+ dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
+
+ if (l & OMAP2_MCSPI_CHCONF_TURBO)
+ len -= es;
+
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = xfer->rx_dma;
+ sg_dma_len(&sg) = len;
+
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (tx) {
+ tx->callback = omap2_mcspi_rx_callback;
+ tx->callback_param = spi;
+ dmaengine_submit(tx);
+ } else {
+ /* FIXME: fall back to PIO? */
+ }
+ }
+
count = xfer->len;
- c = count;
word_len = cs->word_len;
- base = cs->phys;
- tx_reg = base + OMAP2_MCSPI_TX0;
- rx_reg = base + OMAP2_MCSPI_RX0;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
if (word_len <= 8) {
- data_type = OMAP_DMA_DATA_TYPE_S8;
element_count = count;
} else if (word_len <= 16) {
- data_type = OMAP_DMA_DATA_TYPE_S16;
element_count = count >> 1;
} else /* word_len <= 32 */ {
- data_type = OMAP_DMA_DATA_TYPE_S32;
element_count = count >> 2;
}
if (tx != NULL) {
- omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
- data_type, element_count, 1,
- OMAP_DMA_SYNC_ELEMENT,
- mcspi_dma->dma_tx_sync_dev, 0);
-
- omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
- OMAP_DMA_AMODE_CONSTANT,
- tx_reg, 0, 0);
-
- omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
- OMAP_DMA_AMODE_POST_INC,
- xfer->tx_dma, 0, 0);
- }
-
- if (rx != NULL) {
- elements = element_count - 1;
- if (l & OMAP2_MCSPI_CHCONF_TURBO)
- elements--;
-
- omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
- data_type, elements, 1,
- OMAP_DMA_SYNC_ELEMENT,
- mcspi_dma->dma_rx_sync_dev, 1);
-
- omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
- OMAP_DMA_AMODE_CONSTANT,
- rx_reg, 0, 0);
-
- omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
- OMAP_DMA_AMODE_POST_INC,
- xfer->rx_dma, 0, 0);
- }
-
- if (tx != NULL) {
- omap_start_dma(mcspi_dma->dma_tx_channel);
+ dma_async_issue_pending(mcspi_dma->dma_tx);
omap2_mcspi_set_dma_req(spi, 0, 1);
}
if (rx != NULL) {
- omap_start_dma(mcspi_dma->dma_rx_channel);
+ dma_async_issue_pending(mcspi_dma->dma_rx);
omap2_mcspi_set_dma_req(spi, 1, 1);
}
@@ -408,7 +459,10 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
DMA_FROM_DEVICE);
omap2_mcspi_set_enable(spi, 0);
+ elements = element_count - 1;
+
if (l & OMAP2_MCSPI_CHCONF_TURBO) {
+ elements--;
if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
& OMAP2_MCSPI_CHSTAT_RXS)) {
@@ -725,64 +779,38 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
return 0;
}
-static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
-{
- struct spi_device *spi = data;
- struct omap2_mcspi *mcspi;
- struct omap2_mcspi_dma *mcspi_dma;
-
- mcspi = spi_master_get_devdata(spi->master);
- mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
-
- complete(&mcspi_dma->dma_rx_completion);
-
- /* We must disable the DMA RX request */
- omap2_mcspi_set_dma_req(spi, 1, 0);
-}
-
-static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
-{
- struct spi_device *spi = data;
- struct omap2_mcspi *mcspi;
- struct omap2_mcspi_dma *mcspi_dma;
-
- mcspi = spi_master_get_devdata(spi->master);
- mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
-
- complete(&mcspi_dma->dma_tx_completion);
-
- /* We must disable the DMA TX request */
- omap2_mcspi_set_dma_req(spi, 0, 0);
-}
-
static int omap2_mcspi_request_dma(struct spi_device *spi)
{
struct spi_master *master = spi->master;
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
+ dma_cap_mask_t mask;
+ unsigned sig;
mcspi = spi_master_get_devdata(master);
mcspi_dma = mcspi->dma_channels + spi->chip_select;
- if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
- omap2_mcspi_dma_rx_callback, spi,
- &mcspi_dma->dma_rx_channel)) {
- dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
+ init_completion(&mcspi_dma->dma_rx_completion);
+ init_completion(&mcspi_dma->dma_tx_completion);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ sig = mcspi_dma->dma_rx_sync_dev;
+ mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+ if (!mcspi_dma->dma_rx) {
+ dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n");
return -EAGAIN;
}
- if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
- omap2_mcspi_dma_tx_callback, spi,
- &mcspi_dma->dma_tx_channel)) {
- omap_free_dma(mcspi_dma->dma_rx_channel);
- mcspi_dma->dma_rx_channel = -1;
- dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
+ sig = mcspi_dma->dma_tx_sync_dev;
+ mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+ if (!mcspi_dma->dma_tx) {
+ dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n");
+ dma_release_channel(mcspi_dma->dma_rx);
+ mcspi_dma->dma_rx = NULL;
return -EAGAIN;
}
- init_completion(&mcspi_dma->dma_rx_completion);
- init_completion(&mcspi_dma->dma_tx_completion);
-
return 0;
}
@@ -814,8 +842,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
list_add_tail(&cs->node, &ctx->cs);
}
- if (mcspi_dma->dma_rx_channel == -1
- || mcspi_dma->dma_tx_channel == -1) {
+ if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
ret = omap2_mcspi_request_dma(spi);
if (ret < 0)
return ret;
@@ -850,13 +877,13 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
if (spi->chip_select < spi->master->num_chipselect) {
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
- if (mcspi_dma->dma_rx_channel != -1) {
- omap_free_dma(mcspi_dma->dma_rx_channel);
- mcspi_dma->dma_rx_channel = -1;
+ if (mcspi_dma->dma_rx) {
+ dma_release_channel(mcspi_dma->dma_rx);
+ mcspi_dma->dma_rx = NULL;
}
- if (mcspi_dma->dma_tx_channel != -1) {
- omap_free_dma(mcspi_dma->dma_tx_channel);
- mcspi_dma->dma_tx_channel = -1;
+ if (mcspi_dma->dma_tx) {
+ dma_release_channel(mcspi_dma->dma_tx);
+ mcspi_dma->dma_tx = NULL;
}
}
}
@@ -1176,7 +1203,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
break;
}
- mcspi->dma_channels[i].dma_rx_channel = -1;
mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
sprintf(dma_ch_name, "tx%d", i);
dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
@@ -1187,7 +1213,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
break;
}
- mcspi->dma_channels[i].dma_tx_channel = -1;
mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
}
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 02549017212..2a5f64a11b7 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -21,8 +21,9 @@
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
-struct pl08x_lli;
struct pl08x_driver_data;
+struct pl08x_phy_chan;
+struct pl08x_txd;
/* Bitmasks for selecting AHB ports for DMA transfers */
enum {
@@ -46,170 +47,29 @@ enum {
* devices with static assignments
* @muxval: a number usually used to poke into some mux regiser to
* mux in the signal to this channel
- * @cctl_opt: default options for the channel control register
+ * @cctl_memcpy: options for the channel control register for memcpy
+ * *** not used for slave channels ***
* @addr: source/target address in physical memory for this DMA channel,
* can be the address of a FIFO register for burst requests for example.
* This can be left undefined if the PrimeCell API is used for configuring
* this.
- * @circular_buffer: whether the buffer passed in is circular and
- * shall simply be looped round round (like a record baby round
- * round round round)
* @single: the device connected to this channel will request single DMA
* transfers, not bursts. (Bursts are default.)
* @periph_buses: the device connected to this channel is accessible via
* these buses (use PL08X_AHB1 | PL08X_AHB2).
*/
struct pl08x_channel_data {
- char *bus_id;
+ const char *bus_id;
int min_signal;
int max_signal;
u32 muxval;
- u32 cctl;
+ u32 cctl_memcpy;
dma_addr_t addr;
- bool circular_buffer;
bool single;
u8 periph_buses;
};
/**
- * Struct pl08x_bus_data - information of source or destination
- * busses for a transfer
- * @addr: current address
- * @maxwidth: the maximum width of a transfer on this bus
- * @buswidth: the width of this bus in bytes: 1, 2 or 4
- */
-struct pl08x_bus_data {
- dma_addr_t addr;
- u8 maxwidth;
- u8 buswidth;
-};
-
-/**
- * struct pl08x_phy_chan - holder for the physical channels
- * @id: physical index to this channel
- * @lock: a lock to use when altering an instance of this struct
- * @signal: the physical signal (aka channel) serving this physical channel
- * right now
- * @serving: the virtual channel currently being served by this physical
- * channel
- * @locked: channel unavailable for the system, e.g. dedicated to secure
- * world
- */
-struct pl08x_phy_chan {
- unsigned int id;
- void __iomem *base;
- spinlock_t lock;
- int signal;
- struct pl08x_dma_chan *serving;
- bool locked;
-};
-
-/**
- * struct pl08x_sg - structure containing data per sg
- * @src_addr: src address of sg
- * @dst_addr: dst address of sg
- * @len: transfer len in bytes
- * @node: node for txd's dsg_list
- */
-struct pl08x_sg {
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
- size_t len;
- struct list_head node;
-};
-
-/**
- * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
- * @tx: async tx descriptor
- * @node: node for txd list for channels
- * @dsg_list: list of children sg's
- * @direction: direction of transfer
- * @llis_bus: DMA memory address (physical) start for the LLIs
- * @llis_va: virtual memory address start for the LLIs
- * @cctl: control reg values for current txd
- * @ccfg: config reg values for current txd
- */
-struct pl08x_txd {
- struct dma_async_tx_descriptor tx;
- struct list_head node;
- struct list_head dsg_list;
- enum dma_transfer_direction direction;
- dma_addr_t llis_bus;
- struct pl08x_lli *llis_va;
- /* Default cctl value for LLIs */
- u32 cctl;
- /*
- * Settings to be put into the physical channel when we
- * trigger this txd. Other registers are in llis_va[0].
- */
- u32 ccfg;
-};
-
-/**
- * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
- * states
- * @PL08X_CHAN_IDLE: the channel is idle
- * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
- * channel and is running a transfer on it
- * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
- * channel, but the transfer is currently paused
- * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
- * channel to become available (only pertains to memcpy channels)
- */
-enum pl08x_dma_chan_state {
- PL08X_CHAN_IDLE,
- PL08X_CHAN_RUNNING,
- PL08X_CHAN_PAUSED,
- PL08X_CHAN_WAITING,
-};
-
-/**
- * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
- * @chan: wrappped abstract channel
- * @phychan: the physical channel utilized by this channel, if there is one
- * @phychan_hold: if non-zero, hold on to the physical channel even if we
- * have no pending entries
- * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
- * @name: name of channel
- * @cd: channel platform data
- * @runtime_addr: address for RX/TX according to the runtime config
- * @runtime_direction: current direction of this channel according to
- * runtime config
- * @pend_list: queued transactions pending on this channel
- * @at: active transaction on this channel
- * @lock: a lock for this channel data
- * @host: a pointer to the host (internal use)
- * @state: whether the channel is idle, paused, running etc
- * @slave: whether this channel is a device (slave) or for memcpy
- * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
- * channels. Fill with 'true' if peripheral should be flow controller. Direction
- * will be selected at Runtime.
- * @waiting: a TX descriptor on this channel which is waiting for a physical
- * channel to become available
- */
-struct pl08x_dma_chan {
- struct dma_chan chan;
- struct pl08x_phy_chan *phychan;
- int phychan_hold;
- struct tasklet_struct tasklet;
- char *name;
- const struct pl08x_channel_data *cd;
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
- u32 src_cctl;
- u32 dst_cctl;
- enum dma_transfer_direction runtime_direction;
- struct list_head pend_list;
- struct pl08x_txd *at;
- spinlock_t lock;
- struct pl08x_driver_data *host;
- enum pl08x_dma_chan_state state;
- bool slave;
- bool device_fc;
- struct pl08x_txd *waiting;
-};
-
-/**
* struct pl08x_platform_data - the platform configuration for the PL08x
* PrimeCells.
* @slave_channels: the channels defined for the different devices on the
@@ -229,8 +89,8 @@ struct pl08x_platform_data {
const struct pl08x_channel_data *slave_channels;
unsigned int num_slave_channels;
struct pl08x_channel_data memcpy_channel;
- int (*get_signal)(struct pl08x_dma_chan *);
- void (*put_signal)(struct pl08x_dma_chan *);
+ int (*get_signal)(const struct pl08x_channel_data *);
+ void (*put_signal)(const struct pl08x_channel_data *, int);
u8 lli_buses;
u8 mem_buses;
};
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
new file mode 100644
index 00000000000..eb475a8ea25
--- /dev/null
+++ b/include/linux/omap-dma.h
@@ -0,0 +1,22 @@
+/*
+ * OMAP DMA Engine support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_OMAP_DMA_H
+#define __LINUX_OMAP_DMA_H
+
+struct dma_chan;
+
+#if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE)
+bool omap_dma_filter_fn(struct dma_chan *, void *);
+#else
+static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
+{
+ return false;
+}
+#endif
+
+#endif