diff options
author | Adam Oleksy <adam.oleksy@nokia.com> | 2016-09-13 08:40:58 +0200 |
---|---|---|
committer | Tom Rini <trini@konsulko.com> | 2016-10-06 20:57:42 -0400 |
commit | 59a51a10557a714b76c694a9fd1a6196a18d96a0 (patch) | |
tree | aafc6c2400ca1e84c4c09c7967461b4fa1b3e6c1 | |
parent | 9c00d982f1d18fd1e8ed586dc1c041988aba4567 (diff) | |
download | u-boot-59a51a10557a714b76c694a9fd1a6196a18d96a0.tar.gz u-boot-59a51a10557a714b76c694a9fd1a6196a18d96a0.tar.bz2 u-boot-59a51a10557a714b76c694a9fd1a6196a18d96a0.zip |
ARM64: Add support for some of atomic64 operations
These functions are needed in UBI/UBIFS on ZynqMP platform (ARM64).
Signed-off-by: Adam Oleksy <adam.oleksy@nokia.com>
Cc: Albert Aribaud <albert.u.boot@aribaud.net>
-rw-r--r-- | arch/arm/include/asm/atomic.h | 68 |
1 files changed, 67 insertions, 1 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 9b79506b59..171f4d9792 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -21,6 +21,11 @@ #endif typedef struct { volatile int counter; } atomic_t; +#if BITS_PER_LONG == 32 +typedef struct { volatile long long counter; } atomic64_t; +#else /* BIT_PER_LONG == 32 */ +typedef struct { volatile long counter; } atomic64_t; +#endif #define ATOMIC_INIT(i) { (i) } @@ -28,7 +33,9 @@ typedef struct { volatile int counter; } atomic_t; #include <asm/proc-armv/system.h> #define atomic_read(v) ((v)->counter) -#define atomic_set(v,i) (((v)->counter) = (i)) +#define atomic_set(v, i) (((v)->counter) = (i)) +#define atomic64_read(v) atomic_read(v) +#define atomic64_set(v, i) atomic_set(v, i) static inline void atomic_add(int i, volatile atomic_t *v) { @@ -101,6 +108,65 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) local_irq_restore(flags); } +#if BITS_PER_LONG == 32 + +static inline void atomic64_add(long long i, volatile atomic64_t *v) +{ + unsigned long flags = 0; + + local_irq_save(flags); + v->counter += i; + local_irq_restore(flags); +} + +static inline void atomic64_sub(long long i, volatile atomic64_t *v) +{ + unsigned long flags = 0; + + local_irq_save(flags); + v->counter -= i; + local_irq_restore(flags); +} + +#else /* BIT_PER_LONG == 32 */ + +static inline void atomic64_add(long i, volatile atomic64_t *v) +{ + unsigned long flags = 0; + + local_irq_save(flags); + v->counter += i; + local_irq_restore(flags); +} + +static inline void atomic64_sub(long i, volatile atomic64_t *v) +{ + unsigned long flags = 0; + + local_irq_save(flags); + v->counter -= i; + local_irq_restore(flags); +} +#endif + +static inline void atomic64_inc(volatile atomic64_t *v) +{ + unsigned long flags = 0; + + local_irq_save(flags); + v->counter += 1; + local_irq_restore(flags); +} + +static inline void atomic64_dec(volatile atomic64_t *v) +{ + unsigned long flags = 0; + + local_irq_save(flags); + v->counter -= 1; + local_irq_restore(flags); +} + /* Atomic operations are already serializing on ARM */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() |