diff options
author | DongHun Kwak <dh0128.kwak@samsung.com> | 2017-09-13 11:05:34 +0900 |
---|---|---|
committer | DongHun Kwak <dh0128.kwak@samsung.com> | 2017-09-13 11:06:28 +0900 |
commit | 34bd32e225e2a8a94104489b31c42e5801cc1f4a (patch) | |
tree | d021b579a0c190354819974e1eaf0baa54b551f3 /boost/fiber/detail/spinlock_ttas_adaptive_futex.hpp | |
parent | f763a99a501650eff2c60288aa6f10ef916d769e (diff) | |
download | boost-34bd32e225e2a8a94104489b31c42e5801cc1f4a.tar.gz boost-34bd32e225e2a8a94104489b31c42e5801cc1f4a.tar.bz2 boost-34bd32e225e2a8a94104489b31c42e5801cc1f4a.zip |
Imported Upstream version 1.63.0upstream/1.63.0
Change-Id: Iac85556a04b7e58d63ba636dedb0986e3555714a
Signed-off-by: DongHun Kwak <dh0128.kwak@samsung.com>
Diffstat (limited to 'boost/fiber/detail/spinlock_ttas_adaptive_futex.hpp')
-rw-r--r-- | boost/fiber/detail/spinlock_ttas_adaptive_futex.hpp | 111 |
1 files changed, 111 insertions, 0 deletions
diff --git a/boost/fiber/detail/spinlock_ttas_adaptive_futex.hpp b/boost/fiber/detail/spinlock_ttas_adaptive_futex.hpp new file mode 100644 index 0000000000..fbd6a0e4d2 --- /dev/null +++ b/boost/fiber/detail/spinlock_ttas_adaptive_futex.hpp @@ -0,0 +1,111 @@ + +// Copyright Oliver Kowalke 2016. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#ifndef BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_FUTEX_H +#define BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_FUTEX_H + +#include <atomic> +#include <cmath> +#include <random> +#include <thread> + +#include <boost/fiber/detail/config.hpp> +#include <boost/fiber/detail/cpu_relax.hpp> +#include <boost/fiber/detail/futex.hpp> + +// based on informations from: +// https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops +// https://software.intel.com/en-us/articles/long-duration-spin-wait-loops-on-hyper-threading-technology-enabled-intel-processors + +namespace boost { +namespace fibers { +namespace detail { + +class spinlock_ttas_adaptive_futex { +private: + // align shared variable 'value_' at cache line to prevent false sharing + alignas(cache_alignment) std::atomic< std::int32_t > value_{ 0 }; + std::atomic< std::int32_t > tests_{ 0 }; + // padding to avoid other data one the cacheline of shared variable 'value_' + char pad_[cacheline_length]; + +public: + spinlock_ttas_adaptive_futex() noexcept = default; + + spinlock_ttas_adaptive_futex( spinlock_ttas_adaptive_futex const&) = delete; + spinlock_ttas_adaptive_futex & operator=( spinlock_ttas_adaptive_futex const&) = delete; + + void lock() noexcept { + std::int32_t collisions = 0, tests = 0, expected = 0; + const std::int32_t prev_tests = tests_.load( std::memory_order_relaxed); + const std::int32_t max_tests = (std::min)( static_cast< std::int32_t >( BOOST_FIBERS_SPIN_MAX_TESTS), 2 * prev_tests + 10); + // after max. spins or collisions suspend via futex + while ( max_tests > tests && BOOST_FIBERS_SPIN_MAX_COLLISIONS > collisions) { + // avoid using multiple pause instructions for a delay of a specific cycle count + // the delay of cpu_relax() (pause on Intel) depends on the processor family + // the cycle count can not guaranteed from one system to the next + // -> check the shared variable 'value_' in between each cpu_relax() to prevent + // unnecessarily long delays on some systems + // test shared variable 'status_' + // first access to 'value_' -> chache miss + // sucessive acccess to 'value_' -> cache hit + // if 'value_' was released by other fiber + // cached 'value_' is invalidated -> cache miss + if ( 0 != ( expected = value_.load( std::memory_order_relaxed) ) ) { + ++tests; +#if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE) + // give CPU a hint that this thread is in a "spin-wait" loop + // delays the next instruction's execution for a finite period of time (depends on processor family) + // the CPU is not under demand, parts of the pipeline are no longer being used + // -> reduces the power consumed by the CPU + cpu_relax(); +#else + // std::this_thread::yield() allows this_thread to give up the remaining part of its time slice, + // but only to another thread on the same processor + // instead of constant checking, a thread only checks if no other useful work is pending + std::this_thread::yield(); +#endif + } else if ( ! value_.compare_exchange_strong( expected, 1, std::memory_order_acquire, std::memory_order_release) ) { + // spinlock now contended + // utilize 'Binary Exponential Backoff' algorithm + // linear_congruential_engine is a random number engine based on Linear congruential generator (LCG) + static thread_local std::minstd_rand generator; + const std::int32_t z = std::uniform_int_distribution< std::int32_t >{ + 0, static_cast< std::int32_t >( 1) << collisions }( generator); + ++collisions; + for ( std::int32_t i = 0; i < z; ++i) { + cpu_relax(); + } + } else { + // success, lock acquired + tests_.store( prev_tests + (tests - prev_tests) / 8, std::memory_order_relaxed); + return; + } + } + // failure, lock not acquired + // pause via futex + if ( 2 != expected) { + expected = value_.exchange( 2, std::memory_order_acquire); + } + while ( 0 != expected) { + futex_wait( & value_, 2); + expected = value_.exchange( 2, std::memory_order_acquire); + } + // success, lock acquired + tests_.store( prev_tests + (tests - prev_tests) / 8, std::memory_order_relaxed); + } + + void unlock() noexcept { + if ( 1 != value_.fetch_sub( 1, std::memory_order_acquire) ) { + value_.store( 0, std::memory_order_release); + futex_wake( & value_); + } + } +}; + +}}} + +#endif // BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_FUTEX_H |