summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2021-03-22 16:36:27 +0100
committerHeiko Carstens <hca@linux.ibm.com>2021-04-12 12:46:43 +0200
commitb23eb636d7f9f3d7c3ae0dd443cf26c4cc1e18f7 (patch)
tree6cdb42880a4a6b8f3ba65cfae703cbe031523e8a /arch/s390
parentca897bb1814fc77ce2ded7b31350ff2b25ccb0a4 (diff)
downloadlinux-rpi-b23eb636d7f9f3d7c3ae0dd443cf26c4cc1e18f7.tar.gz
linux-rpi-b23eb636d7f9f3d7c3ae0dd443cf26c4cc1e18f7.tar.bz2
linux-rpi-b23eb636d7f9f3d7c3ae0dd443cf26c4cc1e18f7.zip
s390/atomic: get rid of gcc atomic builtins
s390 is the only architecture in the kernel which makes use of gcc's atomic builtin functions. Even though I don't see any technical problem with that right now, remove this code and open-code compare-and-swap loops again, like every other architecture is doing it also. We can switch to a generic implementation when other architectures are doing that also. See also https://lwn.net/Articles/586838/ for forther details. Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/atomic_ops.h36
1 files changed, 30 insertions, 6 deletions
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index 92ea9d9a2b93..2e818f2709bf 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -156,22 +156,46 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
{
- return __sync_val_compare_and_swap(ptr, old, new);
+ asm volatile(
+ " cs %[old],%[new],%[ptr]"
+ : [old] "+d" (old), [ptr] "+Q" (*ptr)
+ : [new] "d" (new)
+ : "cc", "memory");
+ return old;
}
-static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new)
+static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
{
- return __sync_bool_compare_and_swap(ptr, old, new);
+ int old_expected = old;
+
+ asm volatile(
+ " cs %[old],%[new],%[ptr]"
+ : [old] "+d" (old), [ptr] "+Q" (*ptr)
+ : [new] "d" (new)
+ : "cc", "memory");
+ return old == old_expected;
}
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
{
- return __sync_val_compare_and_swap(ptr, old, new);
+ asm volatile(
+ " csg %[old],%[new],%[ptr]"
+ : [old] "+d" (old), [ptr] "+S" (*ptr)
+ : [new] "d" (new)
+ : "cc", "memory");
+ return old;
}
-static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new)
+static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
{
- return __sync_bool_compare_and_swap(ptr, old, new);
+ long old_expected = old;
+
+ asm volatile(
+ " csg %[old],%[new],%[ptr]"
+ : [old] "+d" (old), [ptr] "+S" (*ptr)
+ : [new] "d" (new)
+ : "cc", "memory");
+ return old == old_expected;
}
#endif /* __ARCH_S390_ATOMIC_OPS__ */