summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 10:45:25 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 10:45:25 -0800
commit42a0a1b0fd343888c59afc8b243a77bcec2cc11c (patch)
treeb136c088a244be8e0970767035c93d15127e8c83 /arch/arm/mm
parent52caa59ed335616c5254adff7911465a57ed9f14 (diff)
parentd589829107c5528164a9b7dfe50d0001780865ed (diff)
downloadlinux-3.10-42a0a1b0fd343888c59afc8b243a77bcec2cc11c.tar.gz
linux-3.10-42a0a1b0fd343888c59afc8b243a77bcec2cc11c.tar.bz2
linux-3.10-42a0a1b0fd343888c59afc8b243a77bcec2cc11c.zip
Merge branch 'for-v3.9' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping
Pull DMA-mapping updates from Marek Szyprowski: "This time all patches are related only to ARM DMA-mapping subsystem. The main extension provided by this pull request is highmem support. Besides that it contains a bunch of small bugfixes and cleanups." * 'for-v3.9' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: ARM: DMA-mapping: fix memory leak in IOMMU dma-mapping implementation ARM: dma-mapping: Add maximum alignment order for dma iommu buffers ARM: dma-mapping: use himem for DMA buffers for IOMMU-mapped devices ARM: dma-mapping: add support for CMA regions placed in highmem zone arm: dma mapping: export arm iommu functions ARM: dma-mapping: Add arm_iommu_detach_device() ARM: dma-mapping: Add macro to_dma_iommu_mapping() ARM: dma-mapping: Set arm_dma_set_mask() for iommu->set_dma_mask() ARM: iommu: Include linux/kref.h in asm/dma-iommu.h
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/dma-mapping.c108
1 files changed, 88 insertions, 20 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index dda3904dc64..c7e3759f16d 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -186,13 +186,24 @@ static u64 get_coherent_dma_mask(struct device *dev)
static void __dma_clear_buffer(struct page *page, size_t size)
{
- void *ptr;
/*
* Ensure that the allocated pages are zeroed, and that any data
* lurking in the kernel direct-mapped region is invalidated.
*/
- ptr = page_address(page);
- if (ptr) {
+ if (PageHighMem(page)) {
+ phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
+ phys_addr_t end = base + size;
+ while (size > 0) {
+ void *ptr = kmap_atomic(page);
+ memset(ptr, 0, PAGE_SIZE);
+ dmac_flush_range(ptr, ptr + PAGE_SIZE);
+ kunmap_atomic(ptr);
+ page++;
+ size -= PAGE_SIZE;
+ }
+ outer_flush_range(base, end);
+ } else {
+ void *ptr = page_address(page);
memset(ptr, 0, size);
dmac_flush_range(ptr, ptr + size);
outer_flush_range(__pa(ptr), __pa(ptr) + size);
@@ -243,7 +254,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
#endif
static void *__alloc_from_contiguous(struct device *dev, size_t size,
- pgprot_t prot, struct page **ret_page);
+ pgprot_t prot, struct page **ret_page,
+ const void *caller);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
@@ -346,10 +358,11 @@ static int __init atomic_pool_init(void)
goto no_pages;
if (IS_ENABLED(CONFIG_CMA))
- ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
+ ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
+ atomic_pool_init);
else
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
- &page, NULL);
+ &page, atomic_pool_init);
if (ptr) {
int i;
@@ -542,27 +555,41 @@ static int __free_from_pool(void *start, size_t size)
}
static void *__alloc_from_contiguous(struct device *dev, size_t size,
- pgprot_t prot, struct page **ret_page)
+ pgprot_t prot, struct page **ret_page,
+ const void *caller)
{
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
struct page *page;
+ void *ptr;
page = dma_alloc_from_contiguous(dev, count, order);
if (!page)
return NULL;
__dma_clear_buffer(page, size);
- __dma_remap(page, size, prot);
+ if (PageHighMem(page)) {
+ ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
+ if (!ptr) {
+ dma_release_from_contiguous(dev, page, count);
+ return NULL;
+ }
+ } else {
+ __dma_remap(page, size, prot);
+ ptr = page_address(page);
+ }
*ret_page = page;
- return page_address(page);
+ return ptr;
}
static void __free_from_contiguous(struct device *dev, struct page *page,
- size_t size)
+ void *cpu_addr, size_t size)
{
- __dma_remap(page, size, pgprot_kernel);
+ if (PageHighMem(page))
+ __dma_free_remap(cpu_addr, size);
+ else
+ __dma_remap(page, size, pgprot_kernel);
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
@@ -583,9 +610,9 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
#define __alloc_from_pool(size, ret_page) NULL
-#define __alloc_from_contiguous(dev, size, prot, ret) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
#define __free_from_pool(cpu_addr, size) 0
-#define __free_from_contiguous(dev, page, size) do { } while (0)
+#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
#define __dma_free_remap(cpu_addr, size) do { } while (0)
#endif /* CONFIG_MMU */
@@ -645,7 +672,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
else if (!IS_ENABLED(CONFIG_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
else
- addr = __alloc_from_contiguous(dev, size, prot, &page);
+ addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -739,7 +766,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
* Non-atomic allocations cannot be freed with IRQs disabled
*/
WARN_ON(irqs_disabled());
- __free_from_contiguous(dev, page, size);
+ __free_from_contiguous(dev, page, cpu_addr, size);
}
}
@@ -1002,6 +1029,9 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
unsigned int count, start;
unsigned long flags;
+ if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
+ order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
+
count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
(1 << mapping->order) - 1) >> mapping->order;
@@ -1068,12 +1098,17 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
return pages;
}
+ /*
+ * IOMMU can map any pages, so himem can also be used here
+ */
+ gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+
while (count) {
int j, order = __fls(count);
- pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
+ pages[i] = alloc_pages(gfp, order);
while (!pages[i] && order)
- pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
+ pages[i] = alloc_pages(gfp, --order);
if (!pages[i])
goto error;
@@ -1257,11 +1292,11 @@ err_mapping:
return NULL;
}
-static void __iommu_free_atomic(struct device *dev, struct page **pages,
+static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
dma_addr_t handle, size_t size)
{
__iommu_remove_mapping(dev, handle, size);
- __free_from_pool(page_address(pages[0]), size);
+ __free_from_pool(cpu_addr, size);
}
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
@@ -1344,7 +1379,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
if (__in_atomic_pool(cpu_addr, size)) {
- __iommu_free_atomic(dev, pages, handle, size);
+ __iommu_free_atomic(dev, cpu_addr, handle, size);
return;
}
@@ -1732,6 +1767,8 @@ struct dma_map_ops iommu_ops = {
.unmap_sg = arm_iommu_unmap_sg,
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
+
+ .set_dma_mask = arm_dma_set_mask,
};
struct dma_map_ops iommu_coherent_ops = {
@@ -1745,6 +1782,8 @@ struct dma_map_ops iommu_coherent_ops = {
.map_sg = arm_coherent_iommu_map_sg,
.unmap_sg = arm_coherent_iommu_unmap_sg,
+
+ .set_dma_mask = arm_dma_set_mask,
};
/**
@@ -1799,6 +1838,7 @@ err2:
err:
return ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
static void release_iommu_mapping(struct kref *kref)
{
@@ -1815,6 +1855,7 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
if (mapping)
kref_put(&mapping->kref, release_iommu_mapping);
}
+EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
/**
* arm_iommu_attach_device
@@ -1843,5 +1884,32 @@ int arm_iommu_attach_device(struct device *dev,
pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
return 0;
}
+EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
+
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
+{
+ struct dma_iommu_mapping *mapping;
+
+ mapping = to_dma_iommu_mapping(dev);
+ if (!mapping) {
+ dev_warn(dev, "Not attached\n");
+ return;
+ }
+
+ iommu_detach_device(mapping->domain, dev);
+ kref_put(&mapping->kref, release_iommu_mapping);
+ mapping = NULL;
+ set_dma_ops(dev, NULL);
+
+ pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
+}
+EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
#endif