diff options
-rw-r--r-- | include/linux/preempt.h | 22 | ||||
-rw-r--r-- | include/linux/spinlock_up.h | 29 |
2 files changed, 32 insertions, 19 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 5a710b9c578..87a03c746f1 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -93,14 +93,20 @@ do { \ #else /* !CONFIG_PREEMPT_COUNT */ -#define preempt_disable() do { } while (0) -#define sched_preempt_enable_no_resched() do { } while (0) -#define preempt_enable_no_resched() do { } while (0) -#define preempt_enable() do { } while (0) - -#define preempt_disable_notrace() do { } while (0) -#define preempt_enable_no_resched_notrace() do { } while (0) -#define preempt_enable_notrace() do { } while (0) +/* + * Even if we don't have any preemption, we need preempt disable/enable + * to be barriers, so that we don't have things like get_user/put_user + * that can cause faults and scheduling migrate into our preempt-protected + * region. + */ +#define preempt_disable() barrier() +#define sched_preempt_enable_no_resched() barrier() +#define preempt_enable_no_resched() barrier() +#define preempt_enable() barrier() + +#define preempt_disable_notrace() barrier() +#define preempt_enable_no_resched_notrace() barrier() +#define preempt_enable_notrace() barrier() #endif /* CONFIG_PREEMPT_COUNT */ diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index a26e2fb604e..e2369c167db 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -16,7 +16,10 @@ * In the debug case, 1 means unlocked, 0 means locked. (the values * are inverted, to catch initialization bugs) * - * No atomicity anywhere, we are on UP. + * No atomicity anywhere, we are on UP. However, we still need + * the compiler barriers, because we do not want the compiler to + * move potentially faulting instructions (notably user accesses) + * into the locked sequence, resulting in non-atomic execution. */ #ifdef CONFIG_DEBUG_SPINLOCK @@ -25,6 +28,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; + barrier(); } static inline void @@ -32,6 +36,7 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { local_irq_save(flags); lock->slock = 0; + barrier(); } static inline int arch_spin_trylock(arch_spinlock_t *lock) @@ -39,32 +44,34 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) char oldval = lock->slock; lock->slock = 0; + barrier(); return oldval > 0; } static inline void arch_spin_unlock(arch_spinlock_t *lock) { + barrier(); lock->slock = 1; } /* * Read-write spinlocks. No debug version. */ -#define arch_read_lock(lock) do { (void)(lock); } while (0) -#define arch_write_lock(lock) do { (void)(lock); } while (0) -#define arch_read_trylock(lock) ({ (void)(lock); 1; }) -#define arch_write_trylock(lock) ({ (void)(lock); 1; }) -#define arch_read_unlock(lock) do { (void)(lock); } while (0) -#define arch_write_unlock(lock) do { (void)(lock); } while (0) +#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) +#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) +#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) +#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) +#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) +#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) #else /* DEBUG_SPINLOCK */ #define arch_spin_is_locked(lock) ((void)(lock), 0) /* for sched.c and kernel_lock.c: */ -# define arch_spin_lock(lock) do { (void)(lock); } while (0) -# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) -# define arch_spin_unlock(lock) do { (void)(lock); } while (0) -# define arch_spin_trylock(lock) ({ (void)(lock); 1; }) +# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) +# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) +# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) +# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) #endif /* DEBUG_SPINLOCK */ #define arch_spin_is_contended(lock) (((void)(lock), 0)) |