summaryrefslogtreecommitdiff
path: root/arch/arc
diff options
context:
space:
mode:
authorPavel Kozlov <pavel.kozlov@synopsys.com>2023-08-15 19:11:36 +0400
committerVineet Gupta <vgupta@kernel.org>2023-08-15 17:27:26 -0700
commit42f51fb24fd39cc547c086ab3d8a314cc603a91c (patch)
tree296dcde00f8246e66920889547795d8a4a91db91 /arch/arc
parent4d3696801bad2a037832c15a8d21dfe0c529d9cd (diff)
downloadlinux-rpi-42f51fb24fd39cc547c086ab3d8a314cc603a91c.tar.gz
linux-rpi-42f51fb24fd39cc547c086ab3d8a314cc603a91c.tar.bz2
linux-rpi-42f51fb24fd39cc547c086ab3d8a314cc603a91c.zip
ARC: atomics: Add compiler barrier to atomic operations...
... to avoid unwanted gcc optimizations SMP kernels fail to boot with commit 596ff4a09b89 ("cpumask: re-introduce constant-sized cpumask optimizations"). | | percpu: BUG: failure at mm/percpu.c:2981/pcpu_build_alloc_info()! | The write operation performed by the SCOND instruction in the atomic inline asm code is not properly passed to the compiler. The compiler cannot correctly optimize a nested loop that runs through the cpumask in the pcpu_build_alloc_info() function. Fix this by add a compiler barrier (memory clobber in inline asm). Apparently atomic ops used to have memory clobber implicitly via surrounding smp_mb(). However commit b64be6836993c431e ("ARC: atomics: implement relaxed variants") removed the smp_mb() for the relaxed variants, but failed to add the explicit compiler barrier. Link: https://github.com/foss-for-synopsys-dwc-arc-processors/linux/issues/135 Cc: <stable@vger.kernel.org> # v6.3+ Fixes: b64be6836993c43 ("ARC: atomics: implement relaxed variants") Signed-off-by: Pavel Kozlov <pavel.kozlov@synopsys.com> Signed-off-by: Vineet Gupta <vgupta@kernel.org> [vgupta: tweaked the changelog and added Fixes tag]
Diffstat (limited to 'arch/arc')
-rw-r--r--arch/arc/include/asm/atomic-llsc.h6
-rw-r--r--arch/arc/include/asm/atomic64-arcv2.h6
2 files changed, 6 insertions, 6 deletions
diff --git a/arch/arc/include/asm/atomic-llsc.h b/arch/arc/include/asm/atomic-llsc.h
index 1b0ffaeee16d..5258cb81a16b 100644
--- a/arch/arc/include/asm/atomic-llsc.h
+++ b/arch/arc/include/asm/atomic-llsc.h
@@ -18,7 +18,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
- : "cc"); \
+ : "cc", "memory"); \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
@@ -34,7 +34,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
: [val] "=&r" (val) \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
- : "cc"); \
+ : "cc", "memory"); \
\
return val; \
}
@@ -56,7 +56,7 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
[orig] "=&r" (orig) \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
- : "cc"); \
+ : "cc", "memory"); \
\
return orig; \
}
diff --git a/arch/arc/include/asm/atomic64-arcv2.h b/arch/arc/include/asm/atomic64-arcv2.h
index 6b6db981967a..9b5791b85471 100644
--- a/arch/arc/include/asm/atomic64-arcv2.h
+++ b/arch/arc/include/asm/atomic64-arcv2.h
@@ -60,7 +60,7 @@ static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
- : "cc"); \
+ : "cc", "memory"); \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
@@ -77,7 +77,7 @@ static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: [val] "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
- : "cc"); /* memory clobber comes from smp_mb() */ \
+ : "cc", "memory"); \
\
return val; \
}
@@ -99,7 +99,7 @@ static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: "=&r"(orig), "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
- : "cc"); /* memory clobber comes from smp_mb() */ \
+ : "cc", "memory"); \
\
return orig; \
}