summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>2013-12-09 15:55:11 +0000
committerStefano Stabellini <stefano.stabellini@eu.citrix.com>2013-12-11 16:21:00 +0000
commit02ab71cdae248533620abefa1d46097581457110 (patch)
tree9304a15a0f04b931e66980f367760626117aa555 /arch/arm64
parentb6497b383f65dd1bc60ea1f5d70e157a268fbd15 (diff)
downloadkernel-common-02ab71cdae248533620abefa1d46097581457110.tar.gz
kernel-common-02ab71cdae248533620abefa1d46097581457110.tar.bz2
kernel-common-02ab71cdae248533620abefa1d46097581457110.zip
xen/arm64: do not call the swiotlb functions twice
On arm64 the dma_map_ops implementation is based on the swiotlb. swiotlb-xen, used by default in dom0 on Xen, is also based on the swiotlb. Avoid calling into the default arm64 dma_map_ops functions from xen_dma_map_page, xen_dma_unmap_page, xen_dma_sync_single_for_cpu, and xen_dma_sync_single_for_device otherwise we end up calling into the swiotlb twice. When arm64 gets a non-swiotlb based implementation of dma_map_ops, we'll probably have to reintroduce dma_map_ops calls in page-coherent.h. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> CC: catalin.marinas@arm.com CC: Will.Deacon@arm.com CC: Ian.Campbell@citrix.com
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h4
1 files changed, 0 insertions, 4 deletions
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index 2820f1a6eebe..dde3fc9c49f0 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
}
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
}
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
- __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
}
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
- __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
}
#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */