summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2018-03-27 12:04:51 +0100
committerWill Deacon <will.deacon@arm.com>2018-03-27 12:04:51 +0100
commit3f251cf0abec2afb6eca67f71380670dd55bdebe (patch)
treee2ebead8eb4ed13267839d8710749154d77545d7 /arch/arm64
parent12eb369125abe92bfc55e9ce198200f5807b63ff (diff)
downloadlinux-riscv-3f251cf0abec2afb6eca67f71380670dd55bdebe.tar.gz
linux-riscv-3f251cf0abec2afb6eca67f71380670dd55bdebe.tar.bz2
linux-riscv-3f251cf0abec2afb6eca67f71380670dd55bdebe.zip
Revert "arm64: Revert L1_CACHE_SHIFT back to 6 (64-byte cache line size)"
This reverts commit 1f85b42a691cd8329ba82dbcaeec80ac1231b32a. The internal dma-direct.h API has changed in -next, which collides with us trying to use it to manage non-coherent DMA devices on systems with unreasonably large cache writeback granules. This isn't at all trivial to resolve, so revert our changes for now and we can revisit this after the merge window. Effectively, this just restores our behaviour back to that of 4.16. Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/cache.h6
-rw-r--r--arch/arm64/include/asm/dma-direct.h43
-rw-r--r--arch/arm64/kernel/cpufeature.c9
-rw-r--r--arch/arm64/mm/dma-mapping.c17
-rw-r--r--arch/arm64/mm/init.c3
6 files changed, 11 insertions, 68 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index fd74c5830232..d4d53c87267e 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -17,7 +17,6 @@ config ARM64
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
select ARCH_HAS_KCOV
select ARCH_HAS_MEMBARRIER_SYNC_CORE
- select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 5df5cfe1c143..9bbffc7a301f 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -33,7 +33,7 @@
#define ICACHE_POLICY_VIPT 2
#define ICACHE_POLICY_PIPT 3
-#define L1_CACHE_SHIFT (6)
+#define L1_CACHE_SHIFT 7
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/*
@@ -43,7 +43,7 @@
* cache before the transfer is done, causing old data to be seen by
* the CPU.
*/
-#define ARCH_DMA_MINALIGN (128)
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifndef __ASSEMBLY__
@@ -77,7 +77,7 @@ static inline u32 cache_type_cwg(void)
static inline int cache_line_size(void)
{
u32 cwg = cache_type_cwg();
- return cwg ? 4 << cwg : ARCH_DMA_MINALIGN;
+ return cwg ? 4 << cwg : L1_CACHE_BYTES;
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/dma-direct.h b/arch/arm64/include/asm/dma-direct.h
deleted file mode 100644
index abb1b40ec751..000000000000
--- a/arch/arm64/include/asm/dma-direct.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_DMA_DIRECT_H
-#define __ASM_DMA_DIRECT_H
-
-#include <linux/jump_label.h>
-#include <linux/swiotlb.h>
-
-#include <asm/cache.h>
-
-DECLARE_STATIC_KEY_FALSE(swiotlb_noncoherent_bounce);
-
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- dma_addr_t dev_addr = (dma_addr_t)paddr;
-
- return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
-{
- phys_addr_t paddr = (phys_addr_t)dev_addr;
-
- return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- /*
- * Force swiotlb buffer bouncing when ARCH_DMA_MINALIGN < CWG. The
- * swiotlb bounce buffers are aligned to (1 << IO_TLB_SHIFT).
- */
- if (static_branch_unlikely(&swiotlb_noncoherent_bounce) &&
- !is_device_dma_coherent(dev) &&
- !is_swiotlb_buffer(dma_to_phys(dev, addr)))
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-#endif /* __ASM_DMA_DIRECT_H */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 071a4548a231..96b15d7b10a8 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1624,6 +1624,7 @@ static void __init setup_system_capabilities(void)
void __init setup_cpu_features(void)
{
u32 cwg;
+ int cls;
setup_system_capabilities();
mark_const_caps_ready();
@@ -1644,9 +1645,13 @@ void __init setup_cpu_features(void)
* Check for sane CTR_EL0.CWG value.
*/
cwg = cache_type_cwg();
+ cls = cache_line_size();
if (!cwg)
- pr_warn("No Cache Writeback Granule information, assuming %d\n",
- ARCH_DMA_MINALIGN);
+ pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
+ cls);
+ if (L1_CACHE_BYTES < cls)
+ pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
+ L1_CACHE_BYTES, cls);
}
static bool __maybe_unused
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 1e9dac8684ca..a96ec0181818 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -33,7 +33,6 @@
#include <asm/cacheflush.h>
static int swiotlb __ro_after_init;
-DEFINE_STATIC_KEY_FALSE(swiotlb_noncoherent_bounce);
static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
bool coherent)
@@ -505,14 +504,6 @@ static int __init arm64_dma_init(void)
max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
swiotlb = 1;
- if (WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
- TAINT_CPU_OUT_OF_SPEC,
- "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
- ARCH_DMA_MINALIGN, cache_line_size())) {
- swiotlb = 1;
- static_branch_enable(&swiotlb_noncoherent_bounce);
- }
-
return atomic_pool_init();
}
arch_initcall(arm64_dma_init);
@@ -891,14 +882,6 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
- /*
- * Enable swiotlb for buffer bouncing if ARCH_DMA_MINALIGN < CWG.
- * dma_capable() forces the actual bounce if the device is
- * non-coherent.
- */
- if (static_branch_unlikely(&swiotlb_noncoherent_bounce) && !coherent)
- iommu = NULL;
-
if (!dev->dma_ops)
dev->dma_ops = &arm64_swiotlb_dma_ops;
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 664acf177799..9f3c47acf8ff 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -586,8 +586,7 @@ static void __init free_unused_memmap(void)
void __init mem_init(void)
{
if (swiotlb_force == SWIOTLB_FORCE ||
- max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT) ||
- ARCH_DMA_MINALIGN < cache_line_size())
+ max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
swiotlb_init(1);
else
swiotlb_force = SWIOTLB_NO_FORCE;