summaryrefslogtreecommitdiff
path: root/boost/asio/detail/impl
diff options
context:
space:
mode:
authorDongHun Kwak <dh0128.kwak@samsung.com>2019-12-05 15:11:01 +0900
committerDongHun Kwak <dh0128.kwak@samsung.com>2019-12-05 15:11:01 +0900
commit3fdc3e5ee96dca5b11d1694975a65200787eab86 (patch)
tree5c1733853892b8397d67706fa453a9bd978d2102 /boost/asio/detail/impl
parent88e602c57797660ebe0f9e15dbd64c1ff16dead3 (diff)
downloadboost-3fdc3e5ee96dca5b11d1694975a65200787eab86.tar.gz
boost-3fdc3e5ee96dca5b11d1694975a65200787eab86.tar.bz2
boost-3fdc3e5ee96dca5b11d1694975a65200787eab86.zip
Imported Upstream version 1.66.0upstream/1.66.0
Diffstat (limited to 'boost/asio/detail/impl')
-rw-r--r--boost/asio/detail/impl/buffer_sequence_adapter.ipp10
-rw-r--r--boost/asio/detail/impl/descriptor_ops.ipp23
-rw-r--r--boost/asio/detail/impl/dev_poll_reactor.hpp19
-rw-r--r--boost/asio/detail/impl/dev_poll_reactor.ipp45
-rw-r--r--boost/asio/detail/impl/epoll_reactor.hpp19
-rw-r--r--boost/asio/detail/impl/epoll_reactor.ipp164
-rw-r--r--boost/asio/detail/impl/handler_tracking.ipp105
-rw-r--r--boost/asio/detail/impl/kqueue_reactor.hpp23
-rw-r--r--boost/asio/detail/impl/kqueue_reactor.ipp88
-rw-r--r--boost/asio/detail/impl/null_event.ipp76
-rw-r--r--boost/asio/detail/impl/posix_event.ipp12
-rw-r--r--boost/asio/detail/impl/posix_thread.ipp10
-rw-r--r--boost/asio/detail/impl/reactive_descriptor_service.ipp19
-rw-r--r--boost/asio/detail/impl/reactive_serial_port_service.ipp11
-rw-r--r--boost/asio/detail/impl/reactive_socket_service_base.ipp37
-rw-r--r--boost/asio/detail/impl/resolver_service_base.ipp82
-rw-r--r--boost/asio/detail/impl/scheduler.ipp (renamed from boost/asio/detail/impl/task_io_service.ipp)235
-rw-r--r--boost/asio/detail/impl/select_reactor.hpp19
-rw-r--r--boost/asio/detail/impl/select_reactor.ipp65
-rw-r--r--boost/asio/detail/impl/service_registry.hpp64
-rw-r--r--boost/asio/detail/impl/service_registry.ipp67
-rw-r--r--boost/asio/detail/impl/signal_set_service.ipp56
-rw-r--r--boost/asio/detail/impl/socket_ops.ipp153
-rw-r--r--boost/asio/detail/impl/strand_executor_service.hpp181
-rw-r--r--boost/asio/detail/impl/strand_executor_service.ipp126
-rw-r--r--boost/asio/detail/impl/strand_service.hpp24
-rw-r--r--boost/asio/detail/impl/strand_service.ipp27
-rw-r--r--boost/asio/detail/impl/task_io_service.hpp80
-rw-r--r--boost/asio/detail/impl/timer_queue_ptime.ipp15
-rw-r--r--boost/asio/detail/impl/win_event.ipp5
-rw-r--r--boost/asio/detail/impl/win_iocp_handle_service.ipp44
-rw-r--r--boost/asio/detail/impl/win_iocp_io_context.hpp (renamed from boost/asio/detail/impl/win_iocp_io_service.hpp)71
-rw-r--r--boost/asio/detail/impl/win_iocp_io_context.ipp (renamed from boost/asio/detail/impl/win_iocp_io_service.ipp)112
-rw-r--r--boost/asio/detail/impl/win_iocp_serial_port_service.ipp7
-rw-r--r--boost/asio/detail/impl/win_iocp_socket_service_base.ipp90
-rw-r--r--boost/asio/detail/impl/win_object_handle_service.ipp36
-rw-r--r--boost/asio/detail/impl/win_thread.ipp7
-rw-r--r--boost/asio/detail/impl/win_tss_ptr.ipp4
-rw-r--r--boost/asio/detail/impl/winrt_ssocket_service_base.ipp55
-rw-r--r--boost/asio/detail/impl/winrt_timer_scheduler.hpp19
-rw-r--r--boost/asio/detail/impl/winrt_timer_scheduler.ipp16
41 files changed, 1652 insertions, 669 deletions
diff --git a/boost/asio/detail/impl/buffer_sequence_adapter.ipp b/boost/asio/detail/impl/buffer_sequence_adapter.ipp
index f471863e1a..c2114ba1cf 100644
--- a/boost/asio/detail/impl/buffer_sequence_adapter.ipp
+++ b/boost/asio/detail/impl/buffer_sequence_adapter.ipp
@@ -40,16 +40,16 @@ class winrt_buffer_impl :
public:
explicit winrt_buffer_impl(const boost::asio::const_buffer& b)
{
- bytes_ = const_cast<byte*>(boost::asio::buffer_cast<const byte*>(b));
- length_ = boost::asio::buffer_size(b);
- capacity_ = boost::asio::buffer_size(b);
+ bytes_ = const_cast<byte*>(static_cast<const byte*>(b.data()));
+ length_ = b.size();
+ capacity_ = b.size();
}
explicit winrt_buffer_impl(const boost::asio::mutable_buffer& b)
{
- bytes_ = const_cast<byte*>(boost::asio::buffer_cast<const byte*>(b));
+ bytes_ = static_cast<byte*>(b.data());
length_ = 0;
- capacity_ = boost::asio::buffer_size(b);
+ capacity_ = b.size();
}
~winrt_buffer_impl()
diff --git a/boost/asio/detail/impl/descriptor_ops.ipp b/boost/asio/detail/impl/descriptor_ops.ipp
index 00f6b4796e..cdf5022306 100644
--- a/boost/asio/detail/impl/descriptor_ops.ipp
+++ b/boost/asio/detail/impl/descriptor_ops.ipp
@@ -439,6 +439,29 @@ int poll_write(int d, state_type state, boost::system::error_code& ec)
return result;
}
+int poll_error(int d, state_type state, boost::system::error_code& ec)
+{
+ if (d == -1)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return -1;
+ }
+
+ pollfd fds;
+ fds.fd = d;
+ fds.events = POLLPRI | POLLERR | POLLHUP;
+ fds.revents = 0;
+ int timeout = (state & user_set_non_blocking) ? 0 : -1;
+ errno = 0;
+ int result = error_wrapper(::poll(&fds, 1, timeout), ec);
+ if (result == 0)
+ ec = (state & user_set_non_blocking)
+ ? boost::asio::error::would_block : boost::system::error_code();
+ else if (result > 0)
+ ec = boost::system::error_code();
+ return result;
+}
+
} // namespace descriptor_ops
} // namespace detail
} // namespace asio
diff --git a/boost/asio/detail/impl/dev_poll_reactor.hpp b/boost/asio/detail/impl/dev_poll_reactor.hpp
index 623346edd5..c01450ffea 100644
--- a/boost/asio/detail/impl/dev_poll_reactor.hpp
+++ b/boost/asio/detail/impl/dev_poll_reactor.hpp
@@ -46,12 +46,12 @@ void dev_poll_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
if (shutdown_)
{
- io_service_.post_immediate_completion(op, false);
+ scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
- io_service_.work_started();
+ scheduler_.work_started();
if (earliest)
interrupter_.interrupt();
}
@@ -65,10 +65,23 @@ std::size_t dev_poll_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
- io_service_.post_deferred_completions(ops);
+ scheduler_.post_deferred_completions(ops);
return n;
}
+template <typename Time_Traits>
+void dev_poll_reactor::move_timer(timer_queue<Time_Traits>& queue,
+ typename timer_queue<Time_Traits>::per_timer_data& target,
+ typename timer_queue<Time_Traits>::per_timer_data& source)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ op_queue<operation> ops;
+ queue.cancel_timer(target, ops);
+ queue.move_timer(target, source);
+ lock.unlock();
+ scheduler_.post_deferred_completions(ops);
+}
+
} // namespace detail
} // namespace asio
} // namespace boost
diff --git a/boost/asio/detail/impl/dev_poll_reactor.ipp b/boost/asio/detail/impl/dev_poll_reactor.ipp
index 7efb05ed02..5186d30473 100644
--- a/boost/asio/detail/impl/dev_poll_reactor.ipp
+++ b/boost/asio/detail/impl/dev_poll_reactor.ipp
@@ -30,9 +30,9 @@ namespace boost {
namespace asio {
namespace detail {
-dev_poll_reactor::dev_poll_reactor(boost::asio::io_service& io_service)
- : boost::asio::detail::service_base<dev_poll_reactor>(io_service),
- io_service_(use_service<io_service_impl>(io_service)),
+dev_poll_reactor::dev_poll_reactor(boost::asio::execution_context& ctx)
+ : boost::asio::detail::execution_context_service_base<dev_poll_reactor>(ctx),
+ scheduler_(use_service<scheduler>(ctx)),
mutex_(),
dev_poll_fd_(do_dev_poll_create()),
interrupter_(),
@@ -48,11 +48,11 @@ dev_poll_reactor::dev_poll_reactor(boost::asio::io_service& io_service)
dev_poll_reactor::~dev_poll_reactor()
{
- shutdown_service();
+ shutdown();
::close(dev_poll_fd_);
}
-void dev_poll_reactor::shutdown_service()
+void dev_poll_reactor::shutdown()
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
shutdown_ = true;
@@ -65,12 +65,13 @@ void dev_poll_reactor::shutdown_service()
timer_queues_.get_all_timers(ops);
- io_service_.abandon_operations(ops);
+ scheduler_.abandon_operations(ops);
}
-void dev_poll_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
+void dev_poll_reactor::notify_fork(
+ boost::asio::execution_context::fork_event fork_ev)
{
- if (fork_ev == boost::asio::io_service::fork_child)
+ if (fork_ev == boost::asio::execution_context::fork_child)
{
detail::mutex::scoped_lock lock(mutex_);
@@ -113,7 +114,7 @@ void dev_poll_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
void dev_poll_reactor::init_task()
{
- io_service_.init_task();
+ scheduler_.init_task();
}
int dev_poll_reactor::register_descriptor(socket_type, per_descriptor_data&)
@@ -168,7 +169,7 @@ void dev_poll_reactor::start_op(int op_type, socket_type descriptor,
if (op->perform())
{
lock.unlock();
- io_service_.post_immediate_completion(op, is_continuation);
+ scheduler_.post_immediate_completion(op, is_continuation);
return;
}
}
@@ -176,7 +177,7 @@ void dev_poll_reactor::start_op(int op_type, socket_type descriptor,
}
bool first = op_queue_[op_type].enqueue_operation(descriptor, op);
- io_service_.work_started();
+ scheduler_.work_started();
if (first)
{
::pollfd& ev = add_pending_event_change(descriptor);
@@ -240,13 +241,13 @@ void dev_poll_reactor::cleanup_descriptor_data(
{
}
-void dev_poll_reactor::run(bool block, op_queue<operation>& ops)
+void dev_poll_reactor::run(long usec, op_queue<operation>& ops)
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
// We can return immediately if there's no work to do and the reactor is
// not supposed to block.
- if (!block && op_queue_[read_op].empty() && op_queue_[write_op].empty()
+ if (usec == 0 && op_queue_[read_op].empty() && op_queue_[write_op].empty()
&& op_queue_[except_op].empty() && timer_queues_.all_empty())
return;
@@ -272,7 +273,15 @@ void dev_poll_reactor::run(bool block, op_queue<operation>& ops)
pending_event_change_index_.clear();
}
- int timeout = block ? get_timeout() : 0;
+ // Calculate timeout.
+ int timeout;
+ if (usec == 0)
+ timeout = 0;
+ else
+ {
+ timeout = (usec < 0) ? -1 : ((usec - 1) / 1000 + 1);
+ timeout = get_timeout(timeout);
+ }
lock.unlock();
// Block on the /dev/poll descriptor.
@@ -386,11 +395,13 @@ void dev_poll_reactor::do_remove_timer_queue(timer_queue_base& queue)
timer_queues_.erase(&queue);
}
-int dev_poll_reactor::get_timeout()
+int dev_poll_reactor::get_timeout(int msec)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
- return timer_queues_.wait_duration_msec(5 * 60 * 1000);
+ const int max_msec = 5 * 60 * 1000;
+ return timer_queues_.wait_duration_msec(
+ (msec < 0 || max_msec < msec) ? max_msec : msec);
}
void dev_poll_reactor::cancel_ops_unlocked(socket_type descriptor,
@@ -401,7 +412,7 @@ void dev_poll_reactor::cancel_ops_unlocked(socket_type descriptor,
for (int i = 0; i < max_ops; ++i)
need_interrupt = op_queue_[i].cancel_operations(
descriptor, ops, ec) || need_interrupt;
- io_service_.post_deferred_completions(ops);
+ scheduler_.post_deferred_completions(ops);
if (need_interrupt)
interrupter_.interrupt();
}
diff --git a/boost/asio/detail/impl/epoll_reactor.hpp b/boost/asio/detail/impl/epoll_reactor.hpp
index ea5c6bfc69..537b0fc7da 100644
--- a/boost/asio/detail/impl/epoll_reactor.hpp
+++ b/boost/asio/detail/impl/epoll_reactor.hpp
@@ -44,12 +44,12 @@ void epoll_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
if (shutdown_)
{
- io_service_.post_immediate_completion(op, false);
+ scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
- io_service_.work_started();
+ scheduler_.work_started();
if (earliest)
update_timeout();
}
@@ -63,10 +63,23 @@ std::size_t epoll_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
- io_service_.post_deferred_completions(ops);
+ scheduler_.post_deferred_completions(ops);
return n;
}
+template <typename Time_Traits>
+void epoll_reactor::move_timer(timer_queue<Time_Traits>& queue,
+ typename timer_queue<Time_Traits>::per_timer_data& target,
+ typename timer_queue<Time_Traits>::per_timer_data& source)
+{
+ mutex::scoped_lock lock(mutex_);
+ op_queue<operation> ops;
+ queue.cancel_timer(target, ops);
+ queue.move_timer(target, source);
+ lock.unlock();
+ scheduler_.post_deferred_completions(ops);
+}
+
} // namespace detail
} // namespace asio
} // namespace boost
diff --git a/boost/asio/detail/impl/epoll_reactor.ipp b/boost/asio/detail/impl/epoll_reactor.ipp
index 3d3d244e0a..a9c2602fbe 100644
--- a/boost/asio/detail/impl/epoll_reactor.ipp
+++ b/boost/asio/detail/impl/epoll_reactor.ipp
@@ -35,14 +35,16 @@ namespace boost {
namespace asio {
namespace detail {
-epoll_reactor::epoll_reactor(boost::asio::io_service& io_service)
- : boost::asio::detail::service_base<epoll_reactor>(io_service),
- io_service_(use_service<io_service_impl>(io_service)),
- mutex_(),
+epoll_reactor::epoll_reactor(boost::asio::execution_context& ctx)
+ : execution_context_service_base<epoll_reactor>(ctx),
+ scheduler_(use_service<scheduler>(ctx)),
+ mutex_(BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
+ REACTOR_REGISTRATION, scheduler_.concurrency_hint())),
interrupter_(),
epoll_fd_(do_epoll_create()),
timer_fd_(do_timerfd_create()),
- shutdown_(false)
+ shutdown_(false),
+ registered_descriptors_mutex_(mutex_.enabled())
{
// Add the interrupter's descriptor to epoll.
epoll_event ev = { 0, { 0 } };
@@ -68,7 +70,7 @@ epoll_reactor::~epoll_reactor()
close(timer_fd_);
}
-void epoll_reactor::shutdown_service()
+void epoll_reactor::shutdown()
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
@@ -86,12 +88,13 @@ void epoll_reactor::shutdown_service()
timer_queues_.get_all_timers(ops);
- io_service_.abandon_operations(ops);
+ scheduler_.abandon_operations(ops);
}
-void epoll_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
+void epoll_reactor::notify_fork(
+ boost::asio::execution_context::fork_event fork_ev)
{
- if (fork_ev == boost::asio::io_service::fork_child)
+ if (fork_ev == boost::asio::execution_context::fork_child)
{
if (epoll_fd_ != -1)
::close(epoll_fd_);
@@ -142,7 +145,7 @@ void epoll_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
void epoll_reactor::init_task()
{
- io_service_.init_task();
+ scheduler_.init_task();
}
int epoll_reactor::register_descriptor(socket_type descriptor,
@@ -150,12 +153,18 @@ int epoll_reactor::register_descriptor(socket_type descriptor,
{
descriptor_data = allocate_descriptor_state();
+ BOOST_ASIO_HANDLER_REACTOR_REGISTRATION((
+ context(), static_cast<uintmax_t>(descriptor),
+ reinterpret_cast<uintmax_t>(descriptor_data)));
+
{
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
descriptor_data->reactor_ = this;
descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
+ for (int i = 0; i < max_ops; ++i)
+ descriptor_data->try_speculative_[i] = true;
}
epoll_event ev = { 0, { 0 } };
@@ -186,6 +195,10 @@ int epoll_reactor::register_internal_descriptor(
{
descriptor_data = allocate_descriptor_state();
+ BOOST_ASIO_HANDLER_REACTOR_REGISTRATION((
+ context(), static_cast<uintmax_t>(descriptor),
+ reinterpret_cast<uintmax_t>(descriptor_data)));
+
{
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
@@ -193,6 +206,8 @@ int epoll_reactor::register_internal_descriptor(
descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
descriptor_data->op_queue_[op_type].push(op);
+ for (int i = 0; i < max_ops; ++i)
+ descriptor_data->try_speculative_[i] = true;
}
epoll_event ev = { 0, { 0 } };
@@ -239,17 +254,23 @@ void epoll_reactor::start_op(int op_type, socket_type descriptor,
&& (op_type != read_op
|| descriptor_data->op_queue_[except_op].empty()))
{
- if (op->perform())
+ if (descriptor_data->try_speculative_[op_type])
{
- descriptor_lock.unlock();
- io_service_.post_immediate_completion(op, is_continuation);
- return;
+ if (reactor_op::status status = op->perform())
+ {
+ if (status == reactor_op::done_and_exhausted)
+ if (descriptor_data->registered_events_ != 0)
+ descriptor_data->try_speculative_[op_type] = false;
+ descriptor_lock.unlock();
+ scheduler_.post_immediate_completion(op, is_continuation);
+ return;
+ }
}
if (descriptor_data->registered_events_ == 0)
{
op->ec_ = boost::asio::error::operation_not_supported;
- io_service_.post_immediate_completion(op, is_continuation);
+ scheduler_.post_immediate_completion(op, is_continuation);
return;
}
@@ -268,7 +289,7 @@ void epoll_reactor::start_op(int op_type, socket_type descriptor,
{
op->ec_ = boost::system::error_code(errno,
boost::asio::error::get_system_category());
- io_service_.post_immediate_completion(op, is_continuation);
+ scheduler_.post_immediate_completion(op, is_continuation);
return;
}
}
@@ -277,7 +298,7 @@ void epoll_reactor::start_op(int op_type, socket_type descriptor,
else if (descriptor_data->registered_events_ == 0)
{
op->ec_ = boost::asio::error::operation_not_supported;
- io_service_.post_immediate_completion(op, is_continuation);
+ scheduler_.post_immediate_completion(op, is_continuation);
return;
}
else
@@ -295,7 +316,7 @@ void epoll_reactor::start_op(int op_type, socket_type descriptor,
}
descriptor_data->op_queue_[op_type].push(op);
- io_service_.work_started();
+ scheduler_.work_started();
}
void epoll_reactor::cancel_ops(socket_type,
@@ -319,7 +340,7 @@ void epoll_reactor::cancel_ops(socket_type,
descriptor_lock.unlock();
- io_service_.post_deferred_completions(ops);
+ scheduler_.post_deferred_completions(ops);
}
void epoll_reactor::deregister_descriptor(socket_type descriptor,
@@ -359,7 +380,11 @@ void epoll_reactor::deregister_descriptor(socket_type descriptor,
descriptor_lock.unlock();
- io_service_.post_deferred_completions(ops);
+ BOOST_ASIO_HANDLER_REACTOR_DEREGISTRATION((
+ context(), static_cast<uintmax_t>(descriptor),
+ reinterpret_cast<uintmax_t>(descriptor_data)));
+
+ scheduler_.post_deferred_completions(ops);
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
@@ -394,6 +419,10 @@ void epoll_reactor::deregister_internal_descriptor(socket_type descriptor,
descriptor_lock.unlock();
+ BOOST_ASIO_HANDLER_REACTOR_DEREGISTRATION((
+ context(), static_cast<uintmax_t>(descriptor),
+ reinterpret_cast<uintmax_t>(descriptor_data)));
+
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
}
@@ -415,28 +444,62 @@ void epoll_reactor::cleanup_descriptor_data(
}
}
-void epoll_reactor::run(bool block, op_queue<operation>& ops)
+void epoll_reactor::run(long usec, op_queue<operation>& ops)
{
- // This code relies on the fact that the task_io_service queues the reactor
- // task behind all descriptor operations generated by this function. This
- // means, that by the time we reach this point, any previously returned
- // descriptor operations have already been dequeued. Therefore it is now safe
- // for us to reuse and return them for the task_io_service to queue again.
+ // This code relies on the fact that the scheduler queues the reactor task
+ // behind all descriptor operations generated by this function. This means,
+ // that by the time we reach this point, any previously returned descriptor
+ // operations have already been dequeued. Therefore it is now safe for us to
+ // reuse and return them for the scheduler to queue again.
- // Calculate a timeout only if timerfd is not used.
+ // Calculate timeout. Check the timer queues only if timerfd is not in use.
int timeout;
- if (timer_fd_ != -1)
- timeout = block ? -1 : 0;
+ if (usec == 0)
+ timeout = 0;
else
{
- mutex::scoped_lock lock(mutex_);
- timeout = block ? get_timeout() : 0;
+ timeout = (usec < 0) ? -1 : ((usec - 1) / 1000 + 1);
+ if (timer_fd_ == -1)
+ {
+ mutex::scoped_lock lock(mutex_);
+ timeout = get_timeout(timeout);
+ }
}
// Block on the epoll descriptor.
epoll_event events[128];
int num_events = epoll_wait(epoll_fd_, events, 128, timeout);
+#if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
+ // Trace the waiting events.
+ for (int i = 0; i < num_events; ++i)
+ {
+ void* ptr = events[i].data.ptr;
+ if (ptr == &interrupter_)
+ {
+ // Ignore.
+ }
+# if defined(BOOST_ASIO_HAS_TIMERFD)
+ else if (ptr == &timer_fd_)
+ {
+ // Ignore.
+ }
+# endif // defined(BOOST_ASIO_HAS_TIMERFD)
+ else
+ {
+ unsigned event_mask = 0;
+ if ((events[i].events & EPOLLIN) != 0)
+ event_mask |= BOOST_ASIO_HANDLER_REACTOR_READ_EVENT;
+ if ((events[i].events & EPOLLOUT))
+ event_mask |= BOOST_ASIO_HANDLER_REACTOR_WRITE_EVENT;
+ if ((events[i].events & (EPOLLERR | EPOLLHUP)) != 0)
+ event_mask |= BOOST_ASIO_HANDLER_REACTOR_ERROR_EVENT;
+ BOOST_ASIO_HANDLER_REACTOR_EVENTS((context(),
+ reinterpret_cast<uintmax_t>(ptr), event_mask));
+ }
+ }
+#endif // defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
+
#if defined(BOOST_ASIO_HAS_TIMERFD)
bool check_timers = (timer_fd_ == -1);
#else // defined(BOOST_ASIO_HAS_TIMERFD)
@@ -470,7 +533,7 @@ void epoll_reactor::run(bool block, op_queue<operation>& ops)
else
{
// The descriptor operation doesn't count as work in and of itself, so we
- // don't call work_started() here. This still allows the io_service to
+ // don't call work_started() here. This still allows the scheduler to
// stop if the only remaining operations are descriptor operations.
descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr);
if (!ops.is_enqueued(descriptor_data))
@@ -562,7 +625,8 @@ int epoll_reactor::do_timerfd_create()
epoll_reactor::descriptor_state* epoll_reactor::allocate_descriptor_state()
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
- return registered_descriptors_.alloc();
+ return registered_descriptors_.alloc(BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
+ REACTOR_IO, scheduler_.concurrency_hint()));
}
void epoll_reactor::free_descriptor_state(epoll_reactor::descriptor_state* s)
@@ -598,11 +662,13 @@ void epoll_reactor::update_timeout()
interrupt();
}
-int epoll_reactor::get_timeout()
+int epoll_reactor::get_timeout(int msec)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
- return timer_queues_.wait_duration_msec(5 * 60 * 1000);
+ const int max_msec = 5 * 60 * 1000;
+ return timer_queues_.wait_duration_msec(
+ (msec < 0 || max_msec < msec) ? max_msec : msec);
}
#if defined(BOOST_ASIO_HAS_TIMERFD)
@@ -632,19 +698,18 @@ struct epoll_reactor::perform_io_cleanup_on_block_exit
{
// Post the remaining completed operations for invocation.
if (!ops_.empty())
- reactor_->io_service_.post_deferred_completions(ops_);
+ reactor_->scheduler_.post_deferred_completions(ops_);
// A user-initiated operation has completed, but there's no need to
// explicitly call work_finished() here. Instead, we'll take advantage of
- // the fact that the task_io_service will call work_finished() once we
- // return.
+ // the fact that the scheduler will call work_finished() once we return.
}
else
{
// No user-initiated operations have completed, so we need to compensate
- // for the work_finished() call that the task_io_service will make once
- // this operation returns.
- reactor_->io_service_.work_started();
+ // for the work_finished() call that the scheduler will make once this
+ // operation returns.
+ reactor_->scheduler_.compensating_work_started();
}
}
@@ -653,8 +718,9 @@ struct epoll_reactor::perform_io_cleanup_on_block_exit
operation* first_op_;
};
-epoll_reactor::descriptor_state::descriptor_state()
- : operation(&epoll_reactor::descriptor_state::do_complete)
+epoll_reactor::descriptor_state::descriptor_state(bool locking)
+ : operation(&epoll_reactor::descriptor_state::do_complete),
+ mutex_(locking)
{
}
@@ -671,12 +737,18 @@ operation* epoll_reactor::descriptor_state::perform_io(uint32_t events)
{
if (events & (flag[j] | EPOLLERR | EPOLLHUP))
{
+ try_speculative_[j] = true;
while (reactor_op* op = op_queue_[j].front())
{
- if (op->perform())
+ if (reactor_op::status status = op->perform())
{
op_queue_[j].pop();
io_cleanup.ops_.push(op);
+ if (status == reactor_op::done_and_exhausted)
+ {
+ try_speculative_[j] = false;
+ break;
+ }
}
else
break;
@@ -692,7 +764,7 @@ operation* epoll_reactor::descriptor_state::perform_io(uint32_t events)
}
void epoll_reactor::descriptor_state::do_complete(
- io_service_impl* owner, operation* base,
+ void* owner, operation* base,
const boost::system::error_code& ec, std::size_t bytes_transferred)
{
if (owner)
@@ -701,7 +773,7 @@ void epoll_reactor::descriptor_state::do_complete(
uint32_t events = static_cast<uint32_t>(bytes_transferred);
if (operation* op = descriptor_data->perform_io(events))
{
- op->complete(*owner, ec, 0);
+ op->complete(owner, ec, 0);
}
}
}
diff --git a/boost/asio/detail/impl/handler_tracking.ipp b/boost/asio/detail/impl/handler_tracking.ipp
index 259b5ab62b..aa6b9b2917 100644
--- a/boost/asio/detail/impl/handler_tracking.ipp
+++ b/boost/asio/detail/impl/handler_tracking.ipp
@@ -17,7 +17,11 @@
#include <boost/asio/detail/config.hpp>
-#if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
+#if defined(BOOST_ASIO_CUSTOM_HANDLER_TRACKING)
+
+// The handler tracking implementation is provided by the user-specified header.
+
+#elif defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
#include <cstdarg>
#include <cstdio>
@@ -25,17 +29,15 @@
#if defined(BOOST_ASIO_HAS_BOOST_DATE_TIME)
# include <boost/asio/time_traits.hpp>
-#else // defined(BOOST_ASIO_HAS_BOOST_DATE_TIME)
-# if defined(BOOST_ASIO_HAS_STD_CHRONO)
-# include <chrono>
-# elif defined(BOOST_ASIO_HAS_BOOST_CHRONO)
-# include <boost/chrono/system_clocks.hpp>
-# endif
+#elif defined(BOOST_ASIO_HAS_CHRONO)
+# include <boost/asio/detail/chrono.hpp>
# include <boost/asio/detail/chrono_time_traits.hpp>
# include <boost/asio/wait_traits.hpp>
#endif // defined(BOOST_ASIO_HAS_BOOST_DATE_TIME)
-#if !defined(BOOST_ASIO_WINDOWS)
+#if defined(BOOST_ASIO_WINDOWS_RUNTIME)
+# include <boost/asio/detail/socket_types.hpp>
+#elif !defined(BOOST_ASIO_WINDOWS)
# include <unistd.h>
#endif // !defined(BOOST_ASIO_WINDOWS)
@@ -56,16 +58,11 @@ struct handler_tracking_timestamp
boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
boost::posix_time::time_duration now =
boost::posix_time::microsec_clock::universal_time() - epoch;
-#elif defined(BOOST_ASIO_HAS_STD_CHRONO)
- typedef chrono_time_traits<std::chrono::system_clock,
- boost::asio::wait_traits<std::chrono::system_clock> > traits_helper;
- traits_helper::posix_time_duration now(
- std::chrono::system_clock::now().time_since_epoch());
-#elif defined(BOOST_ASIO_HAS_BOOST_CHRONO)
- typedef chrono_time_traits<boost::chrono::system_clock,
- boost::asio::wait_traits<boost::chrono::system_clock> > traits_helper;
+#elif defined(BOOST_ASIO_HAS_CHRONO)
+ typedef chrono_time_traits<chrono::system_clock,
+ boost::asio::wait_traits<chrono::system_clock> > traits_helper;
traits_helper::posix_time_duration now(
- boost::chrono::system_clock::now().time_since_epoch());
+ chrono::system_clock::now().time_since_epoch());
#endif
seconds = static_cast<uint64_t>(now.total_seconds());
microseconds = static_cast<uint64_t>(now.total_microseconds() % 1000000);
@@ -96,13 +93,15 @@ void handler_tracking::init()
state->current_completion_ = new tss_ptr<completion>;
}
-void handler_tracking::creation(handler_tracking::tracked_handler* h,
- const char* object_type, void* object, const char* op_name)
+void handler_tracking::creation(execution_context&,
+ handler_tracking::tracked_handler& h,
+ const char* object_type, void* object,
+ uintmax_t /*native_handle*/, const char* op_name)
{
static tracking_state* state = get_state();
static_mutex::scoped_lock lock(state->mutex_);
- h->id_ = state->next_id_++;
+ h.id_ = state->next_id_++;
lock.unlock();
handler_tracking_timestamp timestamp;
@@ -118,11 +117,12 @@ void handler_tracking::creation(handler_tracking::tracked_handler* h,
"@asio|%llu.%06llu|%llu*%llu|%.20s@%p.%.50s\n",
#endif // defined(BOOST_ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
- current_id, h->id_, object_type, object, op_name);
+ current_id, h.id_, object_type, object, op_name);
}
-handler_tracking::completion::completion(handler_tracking::tracked_handler* h)
- : id_(h->id_),
+handler_tracking::completion::completion(
+ const handler_tracking::tracked_handler& h)
+ : id_(h.id_),
invoked_(false),
next_(*get_state()->current_completion_)
{
@@ -250,8 +250,9 @@ void handler_tracking::completion::invocation_end()
}
}
-void handler_tracking::operation(const char* object_type,
- void* object, const char* op_name)
+void handler_tracking::operation(execution_context&,
+ const char* object_type, void* object,
+ uintmax_t /*native_handle*/, const char* op_name)
{
static tracking_state* state = get_state();
@@ -271,6 +272,54 @@ void handler_tracking::operation(const char* object_type,
current_id, object_type, object, op_name);
}
+void handler_tracking::reactor_registration(execution_context& /*context*/,
+ uintmax_t /*native_handle*/, uintmax_t /*registration*/)
+{
+}
+
+void handler_tracking::reactor_deregistration(execution_context& /*context*/,
+ uintmax_t /*native_handle*/, uintmax_t /*registration*/)
+{
+}
+
+void handler_tracking::reactor_events(execution_context& /*context*/,
+ uintmax_t /*native_handle*/, unsigned /*events*/)
+{
+}
+
+void handler_tracking::reactor_operation(
+ const tracked_handler& h, const char* op_name,
+ const boost::system::error_code& ec)
+{
+ handler_tracking_timestamp timestamp;
+
+ write_line(
+#if defined(BOOST_ASIO_WINDOWS)
+ "@asio|%I64u.%06I64u|.%I64u|%s,ec=%.20s:%d\n",
+#else // defined(BOOST_ASIO_WINDOWS)
+ "@asio|%llu.%06llu|.%llu|%s,ec=%.20s:%d\n",
+#endif // defined(BOOST_ASIO_WINDOWS)
+ timestamp.seconds, timestamp.microseconds,
+ h.id_, op_name, ec.category().name(), ec.value());
+}
+
+void handler_tracking::reactor_operation(
+ const tracked_handler& h, const char* op_name,
+ const boost::system::error_code& ec, std::size_t bytes_transferred)
+{
+ handler_tracking_timestamp timestamp;
+
+ write_line(
+#if defined(BOOST_ASIO_WINDOWS)
+ "@asio|%I64u.%06I64u|.%I64u|%s,ec=%.20s:%d,bytes_transferred=%I64u\n",
+#else // defined(BOOST_ASIO_WINDOWS)
+ "@asio|%llu.%06llu|.%llu|%s,ec=%.20s:%d,bytes_transferred=%llu\n",
+#endif // defined(BOOST_ASIO_WINDOWS)
+ timestamp.seconds, timestamp.microseconds,
+ h.id_, op_name, ec.category().name(), ec.value(),
+ static_cast<uint64_t>(bytes_transferred));
+}
+
void handler_tracking::write_line(const char* format, ...)
{
using namespace std; // For sprintf (or equivalent).
@@ -287,7 +336,11 @@ void handler_tracking::write_line(const char* format, ...)
va_end(args);
-#if defined(BOOST_ASIO_WINDOWS)
+#if defined(BOOST_ASIO_WINDOWS_RUNTIME)
+ wchar_t wline[256] = L"";
+ mbstowcs_s(0, wline, sizeof(wline) / sizeof(wchar_t), line, length);
+ ::OutputDebugStringW(wline);
+#elif defined(BOOST_ASIO_WINDOWS)
HANDLE stderr_handle = ::GetStdHandle(STD_ERROR_HANDLE);
DWORD bytes_written = 0;
::WriteFile(stderr_handle, line, length, &bytes_written, 0);
diff --git a/boost/asio/detail/impl/kqueue_reactor.hpp b/boost/asio/detail/impl/kqueue_reactor.hpp
index 93cbca26a2..83bfa95765 100644
--- a/boost/asio/detail/impl/kqueue_reactor.hpp
+++ b/boost/asio/detail/impl/kqueue_reactor.hpp
@@ -44,16 +44,16 @@ void kqueue_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
{
- boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
- io_service_.post_immediate_completion(op, false);
+ scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
- io_service_.work_started();
+ scheduler_.work_started();
if (earliest)
interrupt();
}
@@ -63,14 +63,27 @@ std::size_t kqueue_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled)
{
- boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
- io_service_.post_deferred_completions(ops);
+ scheduler_.post_deferred_completions(ops);
return n;
}
+template <typename Time_Traits>
+void kqueue_reactor::move_timer(timer_queue<Time_Traits>& queue,
+ typename timer_queue<Time_Traits>::per_timer_data& target,
+ typename timer_queue<Time_Traits>::per_timer_data& source)
+{
+ mutex::scoped_lock lock(mutex_);
+ op_queue<operation> ops;
+ queue.cancel_timer(target, ops);
+ queue.move_timer(target, source);
+ lock.unlock();
+ scheduler_.post_deferred_completions(ops);
+}
+
} // namespace detail
} // namespace asio
} // namespace boost
diff --git a/boost/asio/detail/impl/kqueue_reactor.ipp b/boost/asio/detail/impl/kqueue_reactor.ipp
index 8057606ce6..c492646a0b 100644
--- a/boost/asio/detail/impl/kqueue_reactor.ipp
+++ b/boost/asio/detail/impl/kqueue_reactor.ipp
@@ -21,6 +21,7 @@
#if defined(BOOST_ASIO_HAS_KQUEUE)
#include <boost/asio/detail/kqueue_reactor.hpp>
+#include <boost/asio/detail/scheduler.hpp>
#include <boost/asio/detail/throw_error.hpp>
#include <boost/asio/error.hpp>
@@ -39,13 +40,15 @@ namespace boost {
namespace asio {
namespace detail {
-kqueue_reactor::kqueue_reactor(boost::asio::io_service& io_service)
- : boost::asio::detail::service_base<kqueue_reactor>(io_service),
- io_service_(use_service<io_service_impl>(io_service)),
- mutex_(),
+kqueue_reactor::kqueue_reactor(boost::asio::execution_context& ctx)
+ : execution_context_service_base<kqueue_reactor>(ctx),
+ scheduler_(use_service<scheduler>(ctx)),
+ mutex_(BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
+ REACTOR_REGISTRATION, scheduler_.concurrency_hint())),
kqueue_fd_(do_kqueue_create()),
interrupter_(),
- shutdown_(false)
+ shutdown_(false),
+ registered_descriptors_mutex_(mutex_.enabled())
{
struct kevent events[1];
BOOST_ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(),
@@ -63,7 +66,7 @@ kqueue_reactor::~kqueue_reactor()
close(kqueue_fd_);
}
-void kqueue_reactor::shutdown_service()
+void kqueue_reactor::shutdown()
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
@@ -81,12 +84,13 @@ void kqueue_reactor::shutdown_service()
timer_queues_.get_all_timers(ops);
- io_service_.abandon_operations(ops);
+ scheduler_.abandon_operations(ops);
}
-void kqueue_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
+void kqueue_reactor::notify_fork(
+ boost::asio::execution_context::fork_event fork_ev)
{
- if (fork_ev == boost::asio::io_service::fork_child)
+ if (fork_ev == boost::asio::execution_context::fork_child)
{
// The kqueue descriptor is automatically closed in the child.
kqueue_fd_ = -1;
@@ -128,7 +132,7 @@ void kqueue_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
void kqueue_reactor::init_task()
{
- io_service_.init_task();
+ scheduler_.init_task();
}
int kqueue_reactor::register_descriptor(socket_type descriptor,
@@ -136,6 +140,10 @@ int kqueue_reactor::register_descriptor(socket_type descriptor,
{
descriptor_data = allocate_descriptor_state();
+ BOOST_ASIO_HANDLER_REACTOR_REGISTRATION((
+ context(), static_cast<uintmax_t>(descriptor),
+ reinterpret_cast<uintmax_t>(descriptor_data)));
+
mutex::scoped_lock lock(descriptor_data->mutex_);
descriptor_data->descriptor_ = descriptor;
@@ -151,6 +159,10 @@ int kqueue_reactor::register_internal_descriptor(
{
descriptor_data = allocate_descriptor_state();
+ BOOST_ASIO_HANDLER_REACTOR_REGISTRATION((
+ context(), static_cast<uintmax_t>(descriptor),
+ reinterpret_cast<uintmax_t>(descriptor_data)));
+
mutex::scoped_lock lock(descriptor_data->mutex_);
descriptor_data->descriptor_ = descriptor;
@@ -205,7 +217,7 @@ void kqueue_reactor::start_op(int op_type, socket_type descriptor,
if (op->perform())
{
descriptor_lock.unlock();
- io_service_.post_immediate_completion(op, is_continuation);
+ scheduler_.post_immediate_completion(op, is_continuation);
return;
}
@@ -224,7 +236,7 @@ void kqueue_reactor::start_op(int op_type, socket_type descriptor,
{
op->ec_ = boost::system::error_code(errno,
boost::asio::error::get_system_category());
- io_service_.post_immediate_completion(op, is_continuation);
+ scheduler_.post_immediate_completion(op, is_continuation);
return;
}
}
@@ -244,7 +256,7 @@ void kqueue_reactor::start_op(int op_type, socket_type descriptor,
}
descriptor_data->op_queue_[op_type].push(op);
- io_service_.work_started();
+ scheduler_.work_started();
}
void kqueue_reactor::cancel_ops(socket_type,
@@ -268,7 +280,7 @@ void kqueue_reactor::cancel_ops(socket_type,
descriptor_lock.unlock();
- io_service_.post_deferred_completions(ops);
+ scheduler_.post_deferred_completions(ops);
}
void kqueue_reactor::deregister_descriptor(socket_type descriptor,
@@ -312,7 +324,11 @@ void kqueue_reactor::deregister_descriptor(socket_type descriptor,
descriptor_lock.unlock();
- io_service_.post_deferred_completions(ops);
+ BOOST_ASIO_HANDLER_REACTOR_DEREGISTRATION((
+ context(), static_cast<uintmax_t>(descriptor),
+ reinterpret_cast<uintmax_t>(descriptor_data)));
+
+ scheduler_.post_deferred_completions(ops);
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
@@ -351,6 +367,10 @@ void kqueue_reactor::deregister_internal_descriptor(socket_type descriptor,
descriptor_lock.unlock();
+ BOOST_ASIO_HANDLER_REACTOR_DEREGISTRATION((
+ context(), static_cast<uintmax_t>(descriptor),
+ reinterpret_cast<uintmax_t>(descriptor_data)));
+
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
}
@@ -372,13 +392,13 @@ void kqueue_reactor::cleanup_descriptor_data(
}
}
-void kqueue_reactor::run(bool block, op_queue<operation>& ops)
+void kqueue_reactor::run(long usec, op_queue<operation>& ops)
{
mutex::scoped_lock lock(mutex_);
// Determine how long to block while waiting for events.
timespec timeout_buf = { 0, 0 };
- timespec* timeout = block ? get_timeout(timeout_buf) : &timeout_buf;
+ timespec* timeout = usec ? get_timeout(usec, timeout_buf) : &timeout_buf;
lock.unlock();
@@ -386,6 +406,31 @@ void kqueue_reactor::run(bool block, op_queue<operation>& ops)
struct kevent events[128];
int num_events = kevent(kqueue_fd_, 0, 0, events, 128, timeout);
+#if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
+ // Trace the waiting events.
+ for (int i = 0; i < num_events; ++i)
+ {
+ void* ptr = reinterpret_cast<void*>(events[i].udata);
+ if (ptr != &interrupter_)
+ {
+ unsigned event_mask = 0;
+ switch (events[i].filter)
+ {
+ case EVFILT_READ:
+ event_mask |= BOOST_ASIO_HANDLER_REACTOR_READ_EVENT;
+ break;
+ case EVFILT_WRITE:
+ event_mask |= BOOST_ASIO_HANDLER_REACTOR_WRITE_EVENT;
+ break;
+ }
+ if ((events[i].flags & (EV_ERROR | EV_OOBAND)) != 0)
+ event_mask |= BOOST_ASIO_HANDLER_REACTOR_ERROR_EVENT;
+ BOOST_ASIO_HANDLER_REACTOR_EVENTS((context(),
+ reinterpret_cast<uintmax_t>(ptr), event_mask));
+ }
+ }
+#endif // defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
+
// Dispatch the waiting events.
for (int i = 0; i < num_events; ++i)
{
@@ -476,7 +521,8 @@ int kqueue_reactor::do_kqueue_create()
kqueue_reactor::descriptor_state* kqueue_reactor::allocate_descriptor_state()
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
- return registered_descriptors_.alloc();
+ return registered_descriptors_.alloc(BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
+ REACTOR_IO, scheduler_.concurrency_hint()));
}
void kqueue_reactor::free_descriptor_state(kqueue_reactor::descriptor_state* s)
@@ -497,11 +543,13 @@ void kqueue_reactor::do_remove_timer_queue(timer_queue_base& queue)
timer_queues_.erase(&queue);
}
-timespec* kqueue_reactor::get_timeout(timespec& ts)
+timespec* kqueue_reactor::get_timeout(long usec, timespec& ts)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
- long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000);
+ const long max_usec = 5 * 60 * 1000 * 1000;
+ usec = timer_queues_.wait_duration_usec(
+ (usec < 0 || max_usec < usec) ? max_usec : usec);
ts.tv_sec = usec / 1000000;
ts.tv_nsec = (usec % 1000000) * 1000;
return &ts;
diff --git a/boost/asio/detail/impl/null_event.ipp b/boost/asio/detail/impl/null_event.ipp
new file mode 100644
index 0000000000..fb9f2e2ed2
--- /dev/null
+++ b/boost/asio/detail/impl/null_event.ipp
@@ -0,0 +1,76 @@
+//
+// detail/impl/null_event.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2017 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_NULL_EVENT_IPP
+#define BOOST_ASIO_DETAIL_IMPL_NULL_EVENT_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_WINDOWS_RUNTIME)
+# include <thread>
+#elif defined(BOOST_ASIO_WINDOWS) || defined(__CYGWIN__)
+# include <boost/asio/detail/socket_types.hpp>
+#else
+# include <unistd.h>
+# if defined(__hpux)
+# include <sys/time.h>
+# endif
+# if !defined(__hpux) || defined(__SELECT)
+# include <sys/select.h>
+# endif
+#endif
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+void null_event::do_wait()
+{
+#if defined(BOOST_ASIO_WINDOWS_RUNTIME)
+ std::this_thread::sleep_until((std::chrono::steady_clock::time_point::max)());
+#elif defined(BOOST_ASIO_WINDOWS) || defined(__CYGWIN__)
+ ::Sleep(INFINITE);
+#else
+ ::pause();
+#endif
+}
+
+void null_event::do_wait_for_usec(long usec)
+{
+#if defined(BOOST_ASIO_WINDOWS_RUNTIME)
+ std::this_thread::sleep_for(std::chrono::microseconds(usec));
+#elif defined(BOOST_ASIO_WINDOWS) || defined(__CYGWIN__)
+ ::Sleep(usec / 1000);
+#elif defined(__hpux) && defined(__SELECT)
+ timespec ts;
+ ts.tv_sec = usec / 1000000;
+ ts.tv_nsec = (usec % 1000000) * 1000;
+ ::pselect(0, 0, 0, 0, &ts, 0);
+#else
+ timeval tv;
+ tv.tv_sec = usec / 1000000;
+ tv.tv_usec = usec % 1000000;
+ ::select(0, 0, 0, 0, &tv);
+#endif
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // BOOST_ASIO_DETAIL_IMPL_NULL_EVENT_IPP
diff --git a/boost/asio/detail/impl/posix_event.ipp b/boost/asio/detail/impl/posix_event.ipp
index 3b465ca3cd..4ff246f301 100644
--- a/boost/asio/detail/impl/posix_event.ipp
+++ b/boost/asio/detail/impl/posix_event.ipp
@@ -32,7 +32,19 @@ namespace detail {
posix_event::posix_event()
: state_(0)
{
+#if (defined(__MACH__) && defined(__APPLE__)) \
+ || (defined(__ANDROID__) && (__ANDROID_API__ < 21))
int error = ::pthread_cond_init(&cond_, 0);
+#else // (defined(__MACH__) && defined(__APPLE__))
+ // || (defined(__ANDROID__) && (__ANDROID_API__ < 21))
+ ::pthread_condattr_t attr;
+ ::pthread_condattr_init(&attr);
+ int error = ::pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
+ if (error == 0)
+ error = ::pthread_cond_init(&cond_, &attr);
+#endif // (defined(__MACH__) && defined(__APPLE__))
+ // || (defined(__ANDROID__) && (__ANDROID_API__ < 21))
+
boost::system::error_code ec(error,
boost::asio::error::get_system_category());
boost::asio::detail::throw_error(ec, "event");
diff --git a/boost/asio/detail/impl/posix_thread.ipp b/boost/asio/detail/impl/posix_thread.ipp
index 66cb18da33..66a6792a4a 100644
--- a/boost/asio/detail/impl/posix_thread.ipp
+++ b/boost/asio/detail/impl/posix_thread.ipp
@@ -44,6 +44,16 @@ void posix_thread::join()
}
}
+std::size_t posix_thread::hardware_concurrency()
+{
+#if defined(_SC_NPROCESSORS_ONLN)
+ long result = sysconf(_SC_NPROCESSORS_ONLN);
+ if (result > 0)
+ return result;
+#endif // defined(_SC_NPROCESSORS_ONLN)
+ return 0;
+}
+
void posix_thread::start_thread(func_base* arg)
{
int error = ::pthread_create(&thread_, 0,
diff --git a/boost/asio/detail/impl/reactive_descriptor_service.ipp b/boost/asio/detail/impl/reactive_descriptor_service.ipp
index a0300c47d0..ffb623839f 100644
--- a/boost/asio/detail/impl/reactive_descriptor_service.ipp
+++ b/boost/asio/detail/impl/reactive_descriptor_service.ipp
@@ -31,13 +31,14 @@ namespace asio {
namespace detail {
reactive_descriptor_service::reactive_descriptor_service(
- boost::asio::io_service& io_service)
- : reactor_(boost::asio::use_service<reactor>(io_service))
+ boost::asio::io_context& io_context)
+ : service_base<reactive_descriptor_service>(io_context),
+ reactor_(boost::asio::use_service<reactor>(io_context))
{
reactor_.init_task();
}
-void reactive_descriptor_service::shutdown_service()
+void reactive_descriptor_service::shutdown()
{
}
@@ -84,7 +85,8 @@ void reactive_descriptor_service::destroy(
{
if (is_open(impl))
{
- BOOST_ASIO_HANDLER_OPERATION(("descriptor", &impl, "close"));
+ BOOST_ASIO_HANDLER_OPERATION((reactor_.context(),
+ "descriptor", &impl, impl.descriptor_, "close"));
reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_,
(impl.state_ & descriptor_ops::possible_dup) == 0);
@@ -126,7 +128,8 @@ boost::system::error_code reactive_descriptor_service::close(
{
if (is_open(impl))
{
- BOOST_ASIO_HANDLER_OPERATION(("descriptor", &impl, "close"));
+ BOOST_ASIO_HANDLER_OPERATION((reactor_.context(),
+ "descriptor", &impl, impl.descriptor_, "close"));
reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_,
(impl.state_ & descriptor_ops::possible_dup) == 0);
@@ -159,7 +162,8 @@ reactive_descriptor_service::release(
if (is_open(impl))
{
- BOOST_ASIO_HANDLER_OPERATION(("descriptor", &impl, "release"));
+ BOOST_ASIO_HANDLER_OPERATION((reactor_.context(),
+ "descriptor", &impl, impl.descriptor_, "release"));
reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, false);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
@@ -179,7 +183,8 @@ boost::system::error_code reactive_descriptor_service::cancel(
return ec;
}
- BOOST_ASIO_HANDLER_OPERATION(("descriptor", &impl, "cancel"));
+ BOOST_ASIO_HANDLER_OPERATION((reactor_.context(),
+ "descriptor", &impl, impl.descriptor_, "cancel"));
reactor_.cancel_ops(impl.descriptor_, impl.reactor_data_);
ec = boost::system::error_code();
diff --git a/boost/asio/detail/impl/reactive_serial_port_service.ipp b/boost/asio/detail/impl/reactive_serial_port_service.ipp
index ffa8857ab6..ffd2a2b558 100644
--- a/boost/asio/detail/impl/reactive_serial_port_service.ipp
+++ b/boost/asio/detail/impl/reactive_serial_port_service.ipp
@@ -31,14 +31,15 @@ namespace asio {
namespace detail {
reactive_serial_port_service::reactive_serial_port_service(
- boost::asio::io_service& io_service)
- : descriptor_service_(io_service)
+ boost::asio::io_context& io_context)
+ : service_base<reactive_serial_port_service>(io_context),
+ descriptor_service_(io_context)
{
}
-void reactive_serial_port_service::shutdown_service()
+void reactive_serial_port_service::shutdown()
{
- descriptor_service_.shutdown_service();
+ descriptor_service_.shutdown();
}
boost::system::error_code reactive_serial_port_service::open(
@@ -73,7 +74,7 @@ boost::system::error_code reactive_serial_port_service::open(
s = descriptor_ops::error_wrapper(::tcgetattr(fd, &ios), ec);
if (s >= 0)
{
-#if defined(_BSD_SOURCE)
+#if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE)
::cfmakeraw(&ios);
#else
ios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK
diff --git a/boost/asio/detail/impl/reactive_socket_service_base.ipp b/boost/asio/detail/impl/reactive_socket_service_base.ipp
index 3594ae0528..23dbbb583c 100644
--- a/boost/asio/detail/impl/reactive_socket_service_base.ipp
+++ b/boost/asio/detail/impl/reactive_socket_service_base.ipp
@@ -29,13 +29,14 @@ namespace asio {
namespace detail {
reactive_socket_service_base::reactive_socket_service_base(
- boost::asio::io_service& io_service)
- : reactor_(use_service<reactor>(io_service))
+ boost::asio::io_context& io_context)
+ : io_context_(io_context),
+ reactor_(use_service<reactor>(io_context))
{
reactor_.init_task();
}
-void reactive_socket_service_base::shutdown_service()
+void reactive_socket_service_base::base_shutdown()
{
}
@@ -82,7 +83,8 @@ void reactive_socket_service_base::destroy(
{
if (impl.socket_ != invalid_socket)
{
- BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "close"));
+ BOOST_ASIO_HANDLER_OPERATION((reactor_.context(),
+ "socket", &impl, impl.socket_, "close"));
reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_,
(impl.state_ & socket_ops::possible_dup) == 0);
@@ -100,7 +102,8 @@ boost::system::error_code reactive_socket_service_base::close(
{
if (is_open(impl))
{
- BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "close"));
+ BOOST_ASIO_HANDLER_OPERATION((reactor_.context(),
+ "socket", &impl, impl.socket_, "close"));
reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_,
(impl.state_ & socket_ops::possible_dup) == 0);
@@ -127,6 +130,27 @@ boost::system::error_code reactive_socket_service_base::close(
return ec;
}
+socket_type reactive_socket_service_base::release(
+ reactive_socket_service_base::base_implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ if (!is_open(impl))
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return invalid_socket;
+ }
+
+ BOOST_ASIO_HANDLER_OPERATION((reactor_.context(),
+ "socket", &impl, impl.socket_, "release"));
+
+ reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_, false);
+ reactor_.cleanup_descriptor_data(impl.reactor_data_);
+ socket_type sock = impl.socket_;
+ construct(impl);
+ ec = boost::system::error_code();
+ return sock;
+}
+
boost::system::error_code reactive_socket_service_base::cancel(
reactive_socket_service_base::base_implementation_type& impl,
boost::system::error_code& ec)
@@ -137,7 +161,8 @@ boost::system::error_code reactive_socket_service_base::cancel(
return ec;
}
- BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "cancel"));
+ BOOST_ASIO_HANDLER_OPERATION((reactor_.context(),
+ "socket", &impl, impl.socket_, "cancel"));
reactor_.cancel_ops(impl.socket_, impl.reactor_data_);
ec = boost::system::error_code();
diff --git a/boost/asio/detail/impl/resolver_service_base.ipp b/boost/asio/detail/impl/resolver_service_base.ipp
index 4ef66cde15..1dbbd20c3f 100644
--- a/boost/asio/detail/impl/resolver_service_base.ipp
+++ b/boost/asio/detail/impl/resolver_service_base.ipp
@@ -24,62 +24,62 @@ namespace boost {
namespace asio {
namespace detail {
-class resolver_service_base::work_io_service_runner
+class resolver_service_base::work_io_context_runner
{
public:
- work_io_service_runner(boost::asio::io_service& io_service)
- : io_service_(io_service) {}
- void operator()() { io_service_.run(); }
+ work_io_context_runner(boost::asio::io_context& io_context)
+ : io_context_(io_context) {}
+ void operator()() { io_context_.run(); }
private:
- boost::asio::io_service& io_service_;
+ boost::asio::io_context& io_context_;
};
resolver_service_base::resolver_service_base(
- boost::asio::io_service& io_service)
- : io_service_impl_(boost::asio::use_service<io_service_impl>(io_service)),
- work_io_service_(new boost::asio::io_service),
- work_io_service_impl_(boost::asio::use_service<
- io_service_impl>(*work_io_service_)),
- work_(new boost::asio::io_service::work(*work_io_service_)),
+ boost::asio::io_context& io_context)
+ : io_context_impl_(boost::asio::use_service<io_context_impl>(io_context)),
+ work_io_context_(new boost::asio::io_context(-1)),
+ work_io_context_impl_(boost::asio::use_service<
+ io_context_impl>(*work_io_context_)),
+ work_(boost::asio::make_work_guard(*work_io_context_)),
work_thread_(0)
{
}
resolver_service_base::~resolver_service_base()
{
- shutdown_service();
+ base_shutdown();
}
-void resolver_service_base::shutdown_service()
+void resolver_service_base::base_shutdown()
{
work_.reset();
- if (work_io_service_.get())
+ if (work_io_context_.get())
{
- work_io_service_->stop();
+ work_io_context_->stop();
if (work_thread_.get())
{
work_thread_->join();
work_thread_.reset();
}
- work_io_service_.reset();
+ work_io_context_.reset();
}
}
-void resolver_service_base::fork_service(
- boost::asio::io_service::fork_event fork_ev)
+void resolver_service_base::base_notify_fork(
+ boost::asio::io_context::fork_event fork_ev)
{
if (work_thread_.get())
{
- if (fork_ev == boost::asio::io_service::fork_prepare)
+ if (fork_ev == boost::asio::io_context::fork_prepare)
{
- work_io_service_->stop();
+ work_io_context_->stop();
work_thread_->join();
}
else
{
- work_io_service_->reset();
+ work_io_context_->restart();
work_thread_.reset(new boost::asio::detail::thread(
- work_io_service_runner(*work_io_service_)));
+ work_io_context_runner(*work_io_context_)));
}
}
}
@@ -93,24 +93,48 @@ void resolver_service_base::construct(
void resolver_service_base::destroy(
resolver_service_base::implementation_type& impl)
{
- BOOST_ASIO_HANDLER_OPERATION(("resolver", &impl, "cancel"));
+ BOOST_ASIO_HANDLER_OPERATION((io_context_impl_.context(),
+ "resolver", &impl, 0, "cancel"));
impl.reset();
}
+void resolver_service_base::move_construct(implementation_type& impl,
+ implementation_type& other_impl)
+{
+ impl = BOOST_ASIO_MOVE_CAST(implementation_type)(other_impl);
+}
+
+void resolver_service_base::move_assign(implementation_type& impl,
+ resolver_service_base&, implementation_type& other_impl)
+{
+ destroy(impl);
+ impl = BOOST_ASIO_MOVE_CAST(implementation_type)(other_impl);
+}
+
void resolver_service_base::cancel(
resolver_service_base::implementation_type& impl)
{
- BOOST_ASIO_HANDLER_OPERATION(("resolver", &impl, "cancel"));
+ BOOST_ASIO_HANDLER_OPERATION((io_context_impl_.context(),
+ "resolver", &impl, 0, "cancel"));
impl.reset(static_cast<void*>(0), socket_ops::noop_deleter());
}
-void resolver_service_base::start_resolve_op(operation* op)
+void resolver_service_base::start_resolve_op(resolve_op* op)
{
- start_work_thread();
- io_service_impl_.work_started();
- work_io_service_impl_.post_immediate_completion(op, false);
+ if (BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER,
+ io_context_impl_.concurrency_hint()))
+ {
+ start_work_thread();
+ io_context_impl_.work_started();
+ work_io_context_impl_.post_immediate_completion(op, false);
+ }
+ else
+ {
+ op->ec_ = boost::asio::error::operation_not_supported;
+ io_context_impl_.post_immediate_completion(op, false);
+ }
}
void resolver_service_base::start_work_thread()
@@ -119,7 +143,7 @@ void resolver_service_base::start_work_thread()
if (!work_thread_.get())
{
work_thread_.reset(new boost::asio::detail::thread(
- work_io_service_runner(*work_io_service_)));
+ work_io_context_runner(*work_io_context_)));
}
}
diff --git a/boost/asio/detail/impl/task_io_service.ipp b/boost/asio/detail/impl/scheduler.ipp
index bc83fd4757..7af3907016 100644
--- a/boost/asio/detail/impl/task_io_service.ipp
+++ b/boost/asio/detail/impl/scheduler.ipp
@@ -1,6 +1,6 @@
//
-// detail/impl/task_io_service.ipp
-// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+// detail/impl/scheduler.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2017 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
@@ -8,8 +8,8 @@
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
-#ifndef BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP
-#define BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP
+#ifndef BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP
+#define BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
@@ -17,13 +17,12 @@
#include <boost/asio/detail/config.hpp>
-#if !defined(BOOST_ASIO_HAS_IOCP)
-
+#include <boost/asio/detail/concurrency_hint.hpp>
#include <boost/asio/detail/event.hpp>
#include <boost/asio/detail/limits.hpp>
#include <boost/asio/detail/reactor.hpp>
-#include <boost/asio/detail/task_io_service.hpp>
-#include <boost/asio/detail/task_io_service_thread_info.hpp>
+#include <boost/asio/detail/scheduler.hpp>
+#include <boost/asio/detail/scheduler_thread_info.hpp>
#include <boost/asio/detail/push_options.hpp>
@@ -31,14 +30,14 @@ namespace boost {
namespace asio {
namespace detail {
-struct task_io_service::task_cleanup
+struct scheduler::task_cleanup
{
~task_cleanup()
{
if (this_thread_->private_outstanding_work > 0)
{
boost::asio::detail::increment(
- task_io_service_->outstanding_work_,
+ scheduler_->outstanding_work_,
this_thread_->private_outstanding_work);
}
this_thread_->private_outstanding_work = 0;
@@ -46,29 +45,29 @@ struct task_io_service::task_cleanup
// Enqueue the completed operations and reinsert the task at the end of
// the operation queue.
lock_->lock();
- task_io_service_->task_interrupted_ = true;
- task_io_service_->op_queue_.push(this_thread_->private_op_queue);
- task_io_service_->op_queue_.push(&task_io_service_->task_operation_);
+ scheduler_->task_interrupted_ = true;
+ scheduler_->op_queue_.push(this_thread_->private_op_queue);
+ scheduler_->op_queue_.push(&scheduler_->task_operation_);
}
- task_io_service* task_io_service_;
+ scheduler* scheduler_;
mutex::scoped_lock* lock_;
thread_info* this_thread_;
};
-struct task_io_service::work_cleanup
+struct scheduler::work_cleanup
{
~work_cleanup()
{
if (this_thread_->private_outstanding_work > 1)
{
boost::asio::detail::increment(
- task_io_service_->outstanding_work_,
+ scheduler_->outstanding_work_,
this_thread_->private_outstanding_work - 1);
}
else if (this_thread_->private_outstanding_work < 1)
{
- task_io_service_->work_finished();
+ scheduler_->work_finished();
}
this_thread_->private_outstanding_work = 0;
@@ -76,31 +75,37 @@ struct task_io_service::work_cleanup
if (!this_thread_->private_op_queue.empty())
{
lock_->lock();
- task_io_service_->op_queue_.push(this_thread_->private_op_queue);
+ scheduler_->op_queue_.push(this_thread_->private_op_queue);
}
#endif // defined(BOOST_ASIO_HAS_THREADS)
}
- task_io_service* task_io_service_;
+ scheduler* scheduler_;
mutex::scoped_lock* lock_;
thread_info* this_thread_;
};
-task_io_service::task_io_service(
- boost::asio::io_service& io_service, std::size_t concurrency_hint)
- : boost::asio::detail::service_base<task_io_service>(io_service),
- one_thread_(concurrency_hint == 1),
- mutex_(),
+scheduler::scheduler(
+ boost::asio::execution_context& ctx, int concurrency_hint)
+ : boost::asio::detail::execution_context_service_base<scheduler>(ctx),
+ one_thread_(concurrency_hint == 1
+ || !BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
+ SCHEDULER, concurrency_hint)
+ || !BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
+ REACTOR_IO, concurrency_hint)),
+ mutex_(BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
+ SCHEDULER, concurrency_hint)),
task_(0),
task_interrupted_(true),
outstanding_work_(0),
stopped_(false),
- shutdown_(false)
+ shutdown_(false),
+ concurrency_hint_(concurrency_hint)
{
BOOST_ASIO_HANDLER_TRACKING_INIT;
}
-void task_io_service::shutdown_service()
+void scheduler::shutdown()
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
@@ -119,18 +124,18 @@ void task_io_service::shutdown_service()
task_ = 0;
}
-void task_io_service::init_task()
+void scheduler::init_task()
{
mutex::scoped_lock lock(mutex_);
if (!shutdown_ && !task_)
{
- task_ = &use_service<reactor>(this->get_io_service());
+ task_ = &use_service<reactor>(this->context());
op_queue_.push(&task_operation_);
wake_one_thread_and_unlock(lock);
}
}
-std::size_t task_io_service::run(boost::system::error_code& ec)
+std::size_t scheduler::run(boost::system::error_code& ec)
{
ec = boost::system::error_code();
if (outstanding_work_ == 0)
@@ -152,7 +157,7 @@ std::size_t task_io_service::run(boost::system::error_code& ec)
return n;
}
-std::size_t task_io_service::run_one(boost::system::error_code& ec)
+std::size_t scheduler::run_one(boost::system::error_code& ec)
{
ec = boost::system::error_code();
if (outstanding_work_ == 0)
@@ -170,7 +175,25 @@ std::size_t task_io_service::run_one(boost::system::error_code& ec)
return do_run_one(lock, this_thread, ec);
}
-std::size_t task_io_service::poll(boost::system::error_code& ec)
+std::size_t scheduler::wait_one(long usec, boost::system::error_code& ec)
+{
+ ec = boost::system::error_code();
+ if (outstanding_work_ == 0)
+ {
+ stop();
+ return 0;
+ }
+
+ thread_info this_thread;
+ this_thread.private_outstanding_work = 0;
+ thread_call_stack::context ctx(this, this_thread);
+
+ mutex::scoped_lock lock(mutex_);
+
+ return do_wait_one(lock, this_thread, usec, ec);
+}
+
+std::size_t scheduler::poll(boost::system::error_code& ec)
{
ec = boost::system::error_code();
if (outstanding_work_ == 0)
@@ -190,8 +213,8 @@ std::size_t task_io_service::poll(boost::system::error_code& ec)
// that are already on a thread-private queue need to be put on to the main
// queue now.
if (one_thread_)
- if (thread_info* outer_thread_info = ctx.next_by_key())
- op_queue_.push(outer_thread_info->private_op_queue);
+ if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
+ op_queue_.push(outer_info->private_op_queue);
#endif // defined(BOOST_ASIO_HAS_THREADS)
std::size_t n = 0;
@@ -201,7 +224,7 @@ std::size_t task_io_service::poll(boost::system::error_code& ec)
return n;
}
-std::size_t task_io_service::poll_one(boost::system::error_code& ec)
+std::size_t scheduler::poll_one(boost::system::error_code& ec)
{
ec = boost::system::error_code();
if (outstanding_work_ == 0)
@@ -221,41 +244,47 @@ std::size_t task_io_service::poll_one(boost::system::error_code& ec)
// that are already on a thread-private queue need to be put on to the main
// queue now.
if (one_thread_)
- if (thread_info* outer_thread_info = ctx.next_by_key())
- op_queue_.push(outer_thread_info->private_op_queue);
+ if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
+ op_queue_.push(outer_info->private_op_queue);
#endif // defined(BOOST_ASIO_HAS_THREADS)
return do_poll_one(lock, this_thread, ec);
}
-void task_io_service::stop()
+void scheduler::stop()
{
mutex::scoped_lock lock(mutex_);
stop_all_threads(lock);
}
-bool task_io_service::stopped() const
+bool scheduler::stopped() const
{
mutex::scoped_lock lock(mutex_);
return stopped_;
}
-void task_io_service::reset()
+void scheduler::restart()
{
mutex::scoped_lock lock(mutex_);
stopped_ = false;
}
-void task_io_service::post_immediate_completion(
- task_io_service::operation* op, bool is_continuation)
+void scheduler::compensating_work_started()
+{
+ thread_info_base* this_thread = thread_call_stack::contains(this);
+ ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
+}
+
+void scheduler::post_immediate_completion(
+ scheduler::operation* op, bool is_continuation)
{
#if defined(BOOST_ASIO_HAS_THREADS)
if (one_thread_ || is_continuation)
{
- if (thread_info* this_thread = thread_call_stack::contains(this))
+ if (thread_info_base* this_thread = thread_call_stack::contains(this))
{
- ++this_thread->private_outstanding_work;
- this_thread->private_op_queue.push(op);
+ ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
+ static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
return;
}
}
@@ -269,14 +298,14 @@ void task_io_service::post_immediate_completion(
wake_one_thread_and_unlock(lock);
}
-void task_io_service::post_deferred_completion(task_io_service::operation* op)
+void scheduler::post_deferred_completion(scheduler::operation* op)
{
#if defined(BOOST_ASIO_HAS_THREADS)
if (one_thread_)
{
- if (thread_info* this_thread = thread_call_stack::contains(this))
+ if (thread_info_base* this_thread = thread_call_stack::contains(this))
{
- this_thread->private_op_queue.push(op);
+ static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
return;
}
}
@@ -287,17 +316,17 @@ void task_io_service::post_deferred_completion(task_io_service::operation* op)
wake_one_thread_and_unlock(lock);
}
-void task_io_service::post_deferred_completions(
- op_queue<task_io_service::operation>& ops)
+void scheduler::post_deferred_completions(
+ op_queue<scheduler::operation>& ops)
{
if (!ops.empty())
{
#if defined(BOOST_ASIO_HAS_THREADS)
if (one_thread_)
{
- if (thread_info* this_thread = thread_call_stack::contains(this))
+ if (thread_info_base* this_thread = thread_call_stack::contains(this))
{
- this_thread->private_op_queue.push(ops);
+ static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
return;
}
}
@@ -309,8 +338,8 @@ void task_io_service::post_deferred_completions(
}
}
-void task_io_service::do_dispatch(
- task_io_service::operation* op)
+void scheduler::do_dispatch(
+ scheduler::operation* op)
{
work_started();
mutex::scoped_lock lock(mutex_);
@@ -318,15 +347,15 @@ void task_io_service::do_dispatch(
wake_one_thread_and_unlock(lock);
}
-void task_io_service::abandon_operations(
- op_queue<task_io_service::operation>& ops)
+void scheduler::abandon_operations(
+ op_queue<scheduler::operation>& ops)
{
- op_queue<task_io_service::operation> ops2;
+ op_queue<scheduler::operation> ops2;
ops2.push(ops);
}
-std::size_t task_io_service::do_run_one(mutex::scoped_lock& lock,
- task_io_service::thread_info& this_thread,
+std::size_t scheduler::do_run_one(mutex::scoped_lock& lock,
+ scheduler::thread_info& this_thread,
const boost::system::error_code& ec)
{
while (!stopped_)
@@ -353,7 +382,7 @@ std::size_t task_io_service::do_run_one(mutex::scoped_lock& lock,
// Run the task. May throw an exception. Only block if the operation
// queue is empty and we're not polling, otherwise we want to return
// as soon as possible.
- task_->run(!more_handlers, this_thread.private_op_queue);
+ task_->run(more_handlers ? 0 : -1, this_thread.private_op_queue);
}
else
{
@@ -369,7 +398,7 @@ std::size_t task_io_service::do_run_one(mutex::scoped_lock& lock,
(void)on_exit;
// Complete the operation. May throw an exception. Deletes the object.
- o->complete(*this, ec, task_result);
+ o->complete(this, ec, task_result);
return 1;
}
@@ -384,8 +413,78 @@ std::size_t task_io_service::do_run_one(mutex::scoped_lock& lock,
return 0;
}
-std::size_t task_io_service::do_poll_one(mutex::scoped_lock& lock,
- task_io_service::thread_info& this_thread,
+std::size_t scheduler::do_wait_one(mutex::scoped_lock& lock,
+ scheduler::thread_info& this_thread, long usec,
+ const boost::system::error_code& ec)
+{
+ if (stopped_)
+ return 0;
+
+ operation* o = op_queue_.front();
+ if (o == 0)
+ {
+ wakeup_event_.clear(lock);
+ wakeup_event_.wait_for_usec(lock, usec);
+ usec = 0; // Wait at most once.
+ o = op_queue_.front();
+ }
+
+ if (o == &task_operation_)
+ {
+ op_queue_.pop();
+ bool more_handlers = (!op_queue_.empty());
+
+ task_interrupted_ = more_handlers;
+
+ if (more_handlers && !one_thread_)
+ wakeup_event_.unlock_and_signal_one(lock);
+ else
+ lock.unlock();
+
+ {
+ task_cleanup on_exit = { this, &lock, &this_thread };
+ (void)on_exit;
+
+ // Run the task. May throw an exception. Only block if the operation
+ // queue is empty and we're not polling, otherwise we want to return
+ // as soon as possible.
+ task_->run(more_handlers ? 0 : usec, this_thread.private_op_queue);
+ }
+
+ o = op_queue_.front();
+ if (o == &task_operation_)
+ {
+ if (!one_thread_)
+ wakeup_event_.maybe_unlock_and_signal_one(lock);
+ return 0;
+ }
+ }
+
+ if (o == 0)
+ return 0;
+
+ op_queue_.pop();
+ bool more_handlers = (!op_queue_.empty());
+
+ std::size_t task_result = o->task_result_;
+
+ if (more_handlers && !one_thread_)
+ wake_one_thread_and_unlock(lock);
+ else
+ lock.unlock();
+
+ // Ensure the count of outstanding work is decremented on block exit.
+ work_cleanup on_exit = { this, &lock, &this_thread };
+ (void)on_exit;
+
+ // Complete the operation. May throw an exception. Deletes the object.
+ o->complete(this, ec, task_result);
+
+ return 1;
+}
+
+std::size_t scheduler::do_poll_one(mutex::scoped_lock& lock,
+ scheduler::thread_info& this_thread,
const boost::system::error_code& ec)
{
if (stopped_)
@@ -404,7 +503,7 @@ std::size_t task_io_service::do_poll_one(mutex::scoped_lock& lock,
// Run the task. May throw an exception. Only block if the operation
// queue is empty and we're not polling, otherwise we want to return
// as soon as possible.
- task_->run(false, this_thread.private_op_queue);
+ task_->run(0, this_thread.private_op_queue);
}
o = op_queue_.front();
@@ -433,12 +532,12 @@ std::size_t task_io_service::do_poll_one(mutex::scoped_lock& lock,
(void)on_exit;
// Complete the operation. May throw an exception. Deletes the object.
- o->complete(*this, ec, task_result);
+ o->complete(this, ec, task_result);
return 1;
}
-void task_io_service::stop_all_threads(
+void scheduler::stop_all_threads(
mutex::scoped_lock& lock)
{
stopped_ = true;
@@ -451,7 +550,7 @@ void task_io_service::stop_all_threads(
}
}
-void task_io_service::wake_one_thread_and_unlock(
+void scheduler::wake_one_thread_and_unlock(
mutex::scoped_lock& lock)
{
if (!wakeup_event_.maybe_unlock_and_signal_one(lock))
@@ -471,6 +570,4 @@ void task_io_service::wake_one_thread_and_unlock(
#include <boost/asio/detail/pop_options.hpp>
-#endif // !defined(BOOST_ASIO_HAS_IOCP)
-
-#endif // BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP
+#endif // BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP
diff --git a/boost/asio/detail/impl/select_reactor.hpp b/boost/asio/detail/impl/select_reactor.hpp
index d3f28f5e61..207a045f6c 100644
--- a/boost/asio/detail/impl/select_reactor.hpp
+++ b/boost/asio/detail/impl/select_reactor.hpp
@@ -51,12 +51,12 @@ void select_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
if (shutdown_)
{
- io_service_.post_immediate_completion(op, false);
+ scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
- io_service_.work_started();
+ scheduler_.work_started();
if (earliest)
interrupter_.interrupt();
}
@@ -70,10 +70,23 @@ std::size_t select_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
- io_service_.post_deferred_completions(ops);
+ scheduler_.post_deferred_completions(ops);
return n;
}
+template <typename Time_Traits>
+void select_reactor::move_timer(timer_queue<Time_Traits>& queue,
+ typename timer_queue<Time_Traits>::per_timer_data& target,
+ typename timer_queue<Time_Traits>::per_timer_data& source)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ op_queue<operation> ops;
+ queue.cancel_timer(target, ops);
+ queue.move_timer(target, source);
+ lock.unlock();
+ scheduler_.post_deferred_completions(ops);
+}
+
} // namespace detail
} // namespace asio
} // namespace boost
diff --git a/boost/asio/detail/impl/select_reactor.ipp b/boost/asio/detail/impl/select_reactor.ipp
index 869f73492b..44fab64269 100644
--- a/boost/asio/detail/impl/select_reactor.ipp
+++ b/boost/asio/detail/impl/select_reactor.ipp
@@ -23,7 +23,6 @@
&& !defined(BOOST_ASIO_HAS_KQUEUE) \
&& !defined(BOOST_ASIO_WINDOWS_RUNTIME))
-#include <boost/asio/detail/bind_handler.hpp>
#include <boost/asio/detail/fd_set_adapter.hpp>
#include <boost/asio/detail/select_reactor.hpp>
#include <boost/asio/detail/signal_blocker.hpp>
@@ -35,9 +34,28 @@ namespace boost {
namespace asio {
namespace detail {
-select_reactor::select_reactor(boost::asio::io_service& io_service)
- : boost::asio::detail::service_base<select_reactor>(io_service),
- io_service_(use_service<io_service_impl>(io_service)),
+#if defined(BOOST_ASIO_HAS_IOCP)
+class select_reactor::thread_function
+{
+public:
+ explicit thread_function(select_reactor* r)
+ : this_(r)
+ {
+ }
+
+ void operator()()
+ {
+ this_->run_thread();
+ }
+
+private:
+ select_reactor* this_;
+};
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+
+select_reactor::select_reactor(boost::asio::execution_context& ctx)
+ : execution_context_service_base<select_reactor>(ctx),
+ scheduler_(use_service<scheduler_type>(ctx)),
mutex_(),
interrupter_(),
#if defined(BOOST_ASIO_HAS_IOCP)
@@ -48,17 +66,16 @@ select_reactor::select_reactor(boost::asio::io_service& io_service)
{
#if defined(BOOST_ASIO_HAS_IOCP)
boost::asio::detail::signal_blocker sb;
- thread_ = new boost::asio::detail::thread(
- bind_handler(&select_reactor::call_run_thread, this));
+ thread_ = new boost::asio::detail::thread(thread_function(this));
#endif // defined(BOOST_ASIO_HAS_IOCP)
}
select_reactor::~select_reactor()
{
- shutdown_service();
+ shutdown();
}
-void select_reactor::shutdown_service()
+void select_reactor::shutdown()
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
shutdown_ = true;
@@ -84,18 +101,19 @@ void select_reactor::shutdown_service()
timer_queues_.get_all_timers(ops);
- io_service_.abandon_operations(ops);
+ scheduler_.abandon_operations(ops);
}
-void select_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
+void select_reactor::notify_fork(
+ boost::asio::execution_context::fork_event fork_ev)
{
- if (fork_ev == boost::asio::io_service::fork_child)
+ if (fork_ev == boost::asio::execution_context::fork_child)
interrupter_.recreate();
}
void select_reactor::init_task()
{
- io_service_.init_task();
+ scheduler_.init_task();
}
int select_reactor::register_descriptor(socket_type,
@@ -135,7 +153,7 @@ void select_reactor::start_op(int op_type, socket_type descriptor,
}
bool first = op_queue_[op_type].enqueue_operation(descriptor, op);
- io_service_.work_started();
+ scheduler_.work_started();
if (first)
interrupter_.interrupt();
}
@@ -168,7 +186,7 @@ void select_reactor::cleanup_descriptor_data(
{
}
-void select_reactor::run(bool block, op_queue<operation>& ops)
+void select_reactor::run(long usec, op_queue<operation>& ops)
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
@@ -205,12 +223,12 @@ void select_reactor::run(bool block, op_queue<operation>& ops)
// We can return immediately if there's no work to do and the reactor is
// not supposed to block.
- if (!block && !have_work_to_do)
+ if (!usec && !have_work_to_do)
return;
// Determine how long to block while waiting for events.
timeval tv_buf = { 0, 0 };
- timeval* tv = block ? get_timeout(tv_buf) : &tv_buf;
+ timeval* tv = usec ? get_timeout(usec, tv_buf) : &tv_buf;
lock.unlock();
@@ -259,15 +277,10 @@ void select_reactor::run_thread()
lock.unlock();
op_queue<operation> ops;
run(true, ops);
- io_service_.post_deferred_completions(ops);
+ scheduler_.post_deferred_completions(ops);
lock.lock();
}
}
-
-void select_reactor::call_run_thread(select_reactor* reactor)
-{
- reactor->run_thread();
-}
#endif // defined(BOOST_ASIO_HAS_IOCP)
void select_reactor::do_add_timer_queue(timer_queue_base& queue)
@@ -282,11 +295,13 @@ void select_reactor::do_remove_timer_queue(timer_queue_base& queue)
timer_queues_.erase(&queue);
}
-timeval* select_reactor::get_timeout(timeval& tv)
+timeval* select_reactor::get_timeout(long usec, timeval& tv)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
- long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000);
+ const long max_usec = 5 * 60 * 1000 * 1000;
+ usec = timer_queues_.wait_duration_usec(
+ (usec < 0 || max_usec < usec) ? max_usec : usec);
tv.tv_sec = usec / 1000000;
tv.tv_usec = usec % 1000000;
return &tv;
@@ -300,7 +315,7 @@ void select_reactor::cancel_ops_unlocked(socket_type descriptor,
for (int i = 0; i < max_ops; ++i)
need_interrupt = op_queue_[i].cancel_operations(
descriptor, ops, ec) || need_interrupt;
- io_service_.post_deferred_completions(ops);
+ scheduler_.post_deferred_completions(ops);
if (need_interrupt)
interrupter_.interrupt();
}
diff --git a/boost/asio/detail/impl/service_registry.hpp b/boost/asio/detail/impl/service_registry.hpp
index 7ffdd029f7..11ad4cf5cc 100644
--- a/boost/asio/detail/impl/service_registry.hpp
+++ b/boost/asio/detail/impl/service_registry.hpp
@@ -21,64 +21,70 @@ namespace boost {
namespace asio {
namespace detail {
-template <typename Service, typename Arg>
-service_registry::service_registry(
- boost::asio::io_service& o, Service*, Arg arg)
- : owner_(o),
- first_service_(new Service(o, arg))
-{
- boost::asio::io_service::service::key key;
- init_key(key, Service::id);
- first_service_->key_ = key;
- first_service_->next_ = 0;
-}
-
template <typename Service>
-Service& service_registry::first_service()
+Service& service_registry::use_service()
{
- return *static_cast<Service*>(first_service_);
+ execution_context::service::key key;
+ init_key<Service>(key, 0);
+ factory_type factory = &service_registry::create<Service, execution_context>;
+ return *static_cast<Service*>(do_use_service(key, factory, &owner_));
}
template <typename Service>
-Service& service_registry::use_service()
+Service& service_registry::use_service(io_context& owner)
{
- boost::asio::io_service::service::key key;
- init_key(key, Service::id);
- factory_type factory = &service_registry::create<Service>;
- return *static_cast<Service*>(do_use_service(key, factory));
+ execution_context::service::key key;
+ init_key<Service>(key, 0);
+ factory_type factory = &service_registry::create<Service, io_context>;
+ return *static_cast<Service*>(do_use_service(key, factory, &owner));
}
template <typename Service>
void service_registry::add_service(Service* new_service)
{
- boost::asio::io_service::service::key key;
- init_key(key, Service::id);
+ execution_context::service::key key;
+ init_key<Service>(key, 0);
return do_add_service(key, new_service);
}
template <typename Service>
bool service_registry::has_service() const
{
- boost::asio::io_service::service::key key;
- init_key(key, Service::id);
+ execution_context::service::key key;
+ init_key<Service>(key, 0);
return do_has_service(key);
}
+template <typename Service>
+inline void service_registry::init_key(
+ execution_context::service::key& key, ...)
+{
+ init_key_from_id(key, Service::id);
+}
+
#if !defined(BOOST_ASIO_NO_TYPEID)
template <typename Service>
-void service_registry::init_key(boost::asio::io_service::service::key& key,
- const boost::asio::detail::service_id<Service>& /*id*/)
+void service_registry::init_key(execution_context::service::key& key,
+ typename enable_if<
+ is_base_of<typename Service::key_type, Service>::value>::type*)
{
key.type_info_ = &typeid(typeid_wrapper<Service>);
key.id_ = 0;
}
-#endif // !defined(BOOST_ASIO_NO_TYPEID)
template <typename Service>
-boost::asio::io_service::service* service_registry::create(
- boost::asio::io_service& owner)
+void service_registry::init_key_from_id(execution_context::service::key& key,
+ const service_id<Service>& /*id*/)
+{
+ key.type_info_ = &typeid(typeid_wrapper<Service>);
+ key.id_ = 0;
+}
+#endif // !defined(BOOST_ASIO_NO_TYPEID)
+
+template <typename Service, typename Owner>
+execution_context::service* service_registry::create(void* owner)
{
- return new Service(owner);
+ return new Service(*static_cast<Owner*>(owner));
}
} // namespace detail
diff --git a/boost/asio/detail/impl/service_registry.ipp b/boost/asio/detail/impl/service_registry.ipp
index 25ac8eccc6..75c3f45625 100644
--- a/boost/asio/detail/impl/service_registry.ipp
+++ b/boost/asio/detail/impl/service_registry.ipp
@@ -26,36 +26,45 @@ namespace boost {
namespace asio {
namespace detail {
+service_registry::service_registry(execution_context& owner)
+ : owner_(owner),
+ first_service_(0)
+{
+}
+
service_registry::~service_registry()
{
- // Shutdown all services. This must be done in a separate loop before the
- // services are destroyed since the destructors of user-defined handler
- // objects may try to access other service objects.
- boost::asio::io_service::service* service = first_service_;
+}
+
+void service_registry::shutdown_services()
+{
+ execution_context::service* service = first_service_;
while (service)
{
- service->shutdown_service();
+ service->shutdown();
service = service->next_;
}
+}
- // Destroy all services.
+void service_registry::destroy_services()
+{
while (first_service_)
{
- boost::asio::io_service::service* next_service = first_service_->next_;
+ execution_context::service* next_service = first_service_->next_;
destroy(first_service_);
first_service_ = next_service;
}
}
-void service_registry::notify_fork(boost::asio::io_service::fork_event fork_ev)
+void service_registry::notify_fork(execution_context::fork_event fork_ev)
{
// Make a copy of all of the services while holding the lock. We don't want
// to hold the lock while calling into each service, as it may try to call
// back into this class.
- std::vector<boost::asio::io_service::service*> services;
+ std::vector<execution_context::service*> services;
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
- boost::asio::io_service::service* service = first_service_;
+ execution_context::service* service = first_service_;
while (service)
{
services.push_back(service);
@@ -68,24 +77,24 @@ void service_registry::notify_fork(boost::asio::io_service::fork_event fork_ev)
// services in the vector. For the other events we want to go in the other
// direction.
std::size_t num_services = services.size();
- if (fork_ev == boost::asio::io_service::fork_prepare)
+ if (fork_ev == execution_context::fork_prepare)
for (std::size_t i = 0; i < num_services; ++i)
- services[i]->fork_service(fork_ev);
+ services[i]->notify_fork(fork_ev);
else
for (std::size_t i = num_services; i > 0; --i)
- services[i - 1]->fork_service(fork_ev);
+ services[i - 1]->notify_fork(fork_ev);
}
-void service_registry::init_key(boost::asio::io_service::service::key& key,
- const boost::asio::io_service::id& id)
+void service_registry::init_key_from_id(execution_context::service::key& key,
+ const execution_context::id& id)
{
key.type_info_ = 0;
key.id_ = &id;
}
bool service_registry::keys_match(
- const boost::asio::io_service::service::key& key1,
- const boost::asio::io_service::service::key& key2)
+ const execution_context::service::key& key1,
+ const execution_context::service::key& key2)
{
if (key1.id_ && key2.id_)
if (key1.id_ == key2.id_)
@@ -96,19 +105,19 @@ bool service_registry::keys_match(
return false;
}
-void service_registry::destroy(boost::asio::io_service::service* service)
+void service_registry::destroy(execution_context::service* service)
{
delete service;
}
-boost::asio::io_service::service* service_registry::do_use_service(
- const boost::asio::io_service::service::key& key,
- factory_type factory)
+execution_context::service* service_registry::do_use_service(
+ const execution_context::service::key& key,
+ factory_type factory, void* owner)
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
// First see if there is an existing service object with the given key.
- boost::asio::io_service::service* service = first_service_;
+ execution_context::service* service = first_service_;
while (service)
{
if (keys_match(service->key_, key))
@@ -120,7 +129,7 @@ boost::asio::io_service::service* service_registry::do_use_service(
// at this time to allow for nested calls into this function from the new
// service's constructor.
lock.unlock();
- auto_service_ptr new_service = { factory(owner_) };
+ auto_service_ptr new_service = { factory(owner) };
new_service.ptr_->key_ = key;
lock.lock();
@@ -142,16 +151,16 @@ boost::asio::io_service::service* service_registry::do_use_service(
}
void service_registry::do_add_service(
- const boost::asio::io_service::service::key& key,
- boost::asio::io_service::service* new_service)
+ const execution_context::service::key& key,
+ execution_context::service* new_service)
{
- if (&owner_ != &new_service->get_io_service())
+ if (&owner_ != &new_service->context())
boost::asio::detail::throw_exception(invalid_service_owner());
boost::asio::detail::mutex::scoped_lock lock(mutex_);
// Check if there is an existing service object with the given key.
- boost::asio::io_service::service* service = first_service_;
+ execution_context::service* service = first_service_;
while (service)
{
if (keys_match(service->key_, key))
@@ -166,11 +175,11 @@ void service_registry::do_add_service(
}
bool service_registry::do_has_service(
- const boost::asio::io_service::service::key& key) const
+ const execution_context::service::key& key) const
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
- boost::asio::io_service::service* service = first_service_;
+ execution_context::service* service = first_service_;
while (service)
{
if (keys_match(service->key_, key))
diff --git a/boost/asio/detail/impl/signal_set_service.ipp b/boost/asio/detail/impl/signal_set_service.ipp
index 95ea8bee21..376d311471 100644
--- a/boost/asio/detail/impl/signal_set_service.ipp
+++ b/boost/asio/detail/impl/signal_set_service.ipp
@@ -18,10 +18,12 @@
#include <boost/asio/detail/config.hpp>
#include <cstring>
+#include <stdexcept>
#include <boost/asio/detail/reactor.hpp>
#include <boost/asio/detail/signal_blocker.hpp>
#include <boost/asio/detail/signal_set_service.hpp>
#include <boost/asio/detail/static_mutex.hpp>
+#include <boost/asio/detail/throw_exception.hpp>
#include <boost/asio/detail/push_options.hpp>
@@ -92,7 +94,7 @@ public:
{
}
- static bool do_perform(reactor_op*)
+ static status do_perform(reactor_op*)
{
signal_state* state = get_signal_state();
@@ -102,10 +104,10 @@ public:
if (signal_number >= 0 && signal_number < max_signal_number)
signal_set_service::deliver_signal(signal_number);
- return false;
+ return not_done;
}
- static void do_complete(io_service_impl* /*owner*/, operation* base,
+ static void do_complete(void* /*owner*/, operation* base,
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
@@ -118,12 +120,13 @@ public:
// && !defined(__CYGWIN__)
signal_set_service::signal_set_service(
- boost::asio::io_service& io_service)
- : io_service_(boost::asio::use_service<io_service_impl>(io_service)),
+ boost::asio::io_context& io_context)
+ : service_base<signal_set_service>(io_context),
+ io_context_(boost::asio::use_service<io_context_impl>(io_context)),
#if !defined(BOOST_ASIO_WINDOWS) \
&& !defined(BOOST_ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
- reactor_(boost::asio::use_service<reactor>(io_service)),
+ reactor_(boost::asio::use_service<reactor>(io_context)),
#endif // !defined(BOOST_ASIO_WINDOWS)
// && !defined(BOOST_ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
@@ -151,7 +154,7 @@ signal_set_service::~signal_set_service()
remove_service(this);
}
-void signal_set_service::shutdown_service()
+void signal_set_service::shutdown()
{
remove_service(this);
@@ -167,11 +170,11 @@ void signal_set_service::shutdown_service()
}
}
- io_service_.abandon_operations(ops);
+ io_context_.abandon_operations(ops);
}
-void signal_set_service::fork_service(
- boost::asio::io_service::fork_event fork_ev)
+void signal_set_service::notify_fork(
+ boost::asio::io_context::fork_event fork_ev)
{
#if !defined(BOOST_ASIO_WINDOWS) \
&& !defined(BOOST_ASIO_WINDOWS_RUNTIME) \
@@ -181,7 +184,7 @@ void signal_set_service::fork_service(
switch (fork_ev)
{
- case boost::asio::io_service::fork_prepare:
+ case boost::asio::io_context::fork_prepare:
{
int read_descriptor = state->read_descriptor_;
state->fork_prepared_ = true;
@@ -190,7 +193,7 @@ void signal_set_service::fork_service(
reactor_.cleanup_descriptor_data(reactor_data_);
}
break;
- case boost::asio::io_service::fork_parent:
+ case boost::asio::io_context::fork_parent:
if (state->fork_prepared_)
{
int read_descriptor = state->read_descriptor_;
@@ -200,7 +203,7 @@ void signal_set_service::fork_service(
read_descriptor, reactor_data_, new pipe_read_op);
}
break;
- case boost::asio::io_service::fork_child:
+ case boost::asio::io_context::fork_child:
if (state->fork_prepared_)
{
boost::asio::detail::signal_blocker blocker;
@@ -439,7 +442,8 @@ boost::system::error_code signal_set_service::cancel(
signal_set_service::implementation_type& impl,
boost::system::error_code& ec)
{
- BOOST_ASIO_HANDLER_OPERATION(("signal_set", &impl, "cancel"));
+ BOOST_ASIO_HANDLER_OPERATION((io_context_.context(),
+ "signal_set", &impl, 0, "cancel"));
op_queue<operation> ops;
{
@@ -454,7 +458,7 @@ boost::system::error_code signal_set_service::cancel(
}
}
- io_service_.post_deferred_completions(ops);
+ io_context_.post_deferred_completions(ops);
ec = boost::system::error_code();
return ec;
@@ -490,7 +494,7 @@ void signal_set_service::deliver_signal(int signal_number)
reg = reg->next_in_table_;
}
- service->io_service_.post_deferred_completions(ops);
+ service->io_context_.post_deferred_completions(ops);
service = service->next_;
}
@@ -507,6 +511,22 @@ void signal_set_service::add_service(signal_set_service* service)
open_descriptors();
#endif // !defined(BOOST_ASIO_WINDOWS) && !defined(__CYGWIN__)
+ // If an io_context object is thread-unsafe then it must be the only
+ // io_context used to create signal_set objects.
+ if (state->service_list_ != 0)
+ {
+ if (!BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER,
+ service->io_context_.concurrency_hint())
+ || !BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER,
+ state->service_list_->io_context_.concurrency_hint()))
+ {
+ std::logic_error ex(
+ "Thread-unsafe io_context objects require "
+ "exclusive access to signal handling.");
+ boost::asio::detail::throw_exception(ex);
+ }
+ }
+
// Insert service into linked list of all services.
service->next_ = state->service_list_;
service->prev_ = 0;
@@ -620,7 +640,7 @@ void signal_set_service::close_descriptors()
void signal_set_service::start_wait_op(
signal_set_service::implementation_type& impl, signal_op* op)
{
- io_service_.work_started();
+ io_context_.work_started();
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
@@ -632,7 +652,7 @@ void signal_set_service::start_wait_op(
{
--reg->undelivered_;
op->signal_number_ = reg->signal_number_;
- io_service_.post_deferred_completion(op);
+ io_context_.post_deferred_completion(op);
return;
}
diff --git a/boost/asio/detail/impl/socket_ops.ipp b/boost/asio/detail/impl/socket_ops.ipp
index c9683f4a09..94be0f280c 100644
--- a/boost/asio/detail/impl/socket_ops.ipp
+++ b/boost/asio/detail/impl/socket_ops.ipp
@@ -169,7 +169,7 @@ socket_type sync_accept(socket_type s, state_type state,
return invalid_socket;
// Wait for socket to become ready.
- if (socket_ops::poll_read(s, 0, ec) < 0)
+ if (socket_ops::poll_read(s, 0, -1, ec) < 0)
return invalid_socket;
}
}
@@ -242,8 +242,6 @@ bool non_blocking_accept(socket_type s,
if (ec == boost::asio::error::would_block
|| ec == boost::asio::error::try_again)
{
- if (state & user_set_non_blocking)
- return true;
// Fall through to retry operation.
}
else if (ec == boost::asio::error::connection_aborted)
@@ -506,7 +504,7 @@ void sync_connect(socket_type s, const socket_addr_type* addr,
}
// Wait for socket to become ready.
- if (socket_ops::poll_connect(s, ec) < 0)
+ if (socket_ops::poll_connect(s, -1, ec) < 0)
return;
// Get the error code from the connect operation.
@@ -773,6 +771,8 @@ signed_size_type recv(socket_type s, buf* bufs, size_t count,
ec = boost::asio::error::connection_reset;
else if (ec.value() == ERROR_PORT_UNREACHABLE)
ec = boost::asio::error::connection_refused;
+ else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)
+ ec.assign(0, ec.category());
if (result != 0)
return socket_error_retval;
ec = boost::system::error_code();
@@ -828,7 +828,7 @@ size_t sync_recv(socket_type s, state_type state, buf* bufs,
return 0;
// Wait for socket to become ready.
- if (socket_ops::poll_read(s, 0, ec) < 0)
+ if (socket_ops::poll_read(s, 0, -1, ec) < 0)
return 0;
}
}
@@ -851,6 +851,10 @@ void complete_iocp_recv(state_type state,
{
ec = boost::asio::error::connection_refused;
}
+ else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)
+ {
+ ec.assign(0, ec.category());
+ }
// Check for connection closed.
else if (!ec && bytes_transferred == 0
@@ -921,6 +925,8 @@ signed_size_type recvfrom(socket_type s, buf* bufs, size_t count,
ec = boost::asio::error::connection_reset;
else if (ec.value() == ERROR_PORT_UNREACHABLE)
ec = boost::asio::error::connection_refused;
+ else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)
+ ec.assign(0, ec.category());
if (result != 0)
return socket_error_retval;
ec = boost::system::error_code();
@@ -967,7 +973,7 @@ size_t sync_recvfrom(socket_type s, state_type state, buf* bufs,
return 0;
// Wait for socket to become ready.
- if (socket_ops::poll_read(s, 0, ec) < 0)
+ if (socket_ops::poll_read(s, 0, -1, ec) < 0)
return 0;
}
}
@@ -990,6 +996,10 @@ void complete_iocp_recvfrom(
{
ec = boost::asio::error::connection_refused;
}
+ else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)
+ {
+ ec.assign(0, ec.category());
+ }
}
#else // defined(BOOST_ASIO_HAS_IOCP)
@@ -1080,7 +1090,7 @@ size_t sync_recvmsg(socket_type s, state_type state,
return 0;
// Wait for socket to become ready.
- if (socket_ops::poll_read(s, 0, ec) < 0)
+ if (socket_ops::poll_read(s, 0, -1, ec) < 0)
return 0;
}
}
@@ -1103,6 +1113,10 @@ void complete_iocp_recvmsg(
{
ec = boost::asio::error::connection_refused;
}
+ else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)
+ {
+ ec.assign(0, ec.category());
+ }
}
#else // defined(BOOST_ASIO_HAS_IOCP)
@@ -1207,7 +1221,7 @@ size_t sync_send(socket_type s, state_type state, const buf* bufs,
return 0;
// Wait for socket to become ready.
- if (socket_ops::poll_write(s, 0, ec) < 0)
+ if (socket_ops::poll_write(s, 0, -1, ec) < 0)
return 0;
}
}
@@ -1331,7 +1345,7 @@ size_t sync_sendto(socket_type s, state_type state, const buf* bufs,
return 0;
// Wait for socket to become ready.
- if (socket_ops::poll_write(s, 0, ec) < 0)
+ if (socket_ops::poll_write(s, 0, -1, ec) < 0)
return 0;
}
}
@@ -1785,7 +1799,8 @@ int select(int nfds, fd_set* readfds, fd_set* writefds,
#endif
}
-int poll_read(socket_type s, state_type state, boost::system::error_code& ec)
+int poll_read(socket_type s, state_type state,
+ int msec, boost::system::error_code& ec)
{
if (s == invalid_socket)
{
@@ -1799,10 +1814,22 @@ int poll_read(socket_type s, state_type state, boost::system::error_code& ec)
fd_set fds;
FD_ZERO(&fds);
FD_SET(s, &fds);
- timeval zero_timeout;
- zero_timeout.tv_sec = 0;
- zero_timeout.tv_usec = 0;
- timeval* timeout = (state & user_set_non_blocking) ? &zero_timeout : 0;
+ timeval timeout_obj;
+ timeval* timeout;
+ if (state & user_set_non_blocking)
+ {
+ timeout_obj.tv_sec = 0;
+ timeout_obj.tv_usec = 0;
+ timeout = &timeout_obj;
+ }
+ else if (msec >= 0)
+ {
+ timeout_obj.tv_sec = msec / 1000;
+ timeout_obj.tv_usec = (msec % 1000) * 1000;
+ timeout = &timeout_obj;
+ }
+ else
+ timeout = 0;
clear_last_error();
int result = error_wrapper(::select(s + 1, &fds, 0, 0, timeout), ec);
#else // defined(BOOST_ASIO_WINDOWS)
@@ -1812,7 +1839,7 @@ int poll_read(socket_type s, state_type state, boost::system::error_code& ec)
fds.fd = s;
fds.events = POLLIN;
fds.revents = 0;
- int timeout = (state & user_set_non_blocking) ? 0 : -1;
+ int timeout = (state & user_set_non_blocking) ? 0 : msec;
clear_last_error();
int result = error_wrapper(::poll(&fds, 1, timeout), ec);
#endif // defined(BOOST_ASIO_WINDOWS)
@@ -1826,7 +1853,8 @@ int poll_read(socket_type s, state_type state, boost::system::error_code& ec)
return result;
}
-int poll_write(socket_type s, state_type state, boost::system::error_code& ec)
+int poll_write(socket_type s, state_type state,
+ int msec, boost::system::error_code& ec)
{
if (s == invalid_socket)
{
@@ -1840,10 +1868,22 @@ int poll_write(socket_type s, state_type state, boost::system::error_code& ec)
fd_set fds;
FD_ZERO(&fds);
FD_SET(s, &fds);
- timeval zero_timeout;
- zero_timeout.tv_sec = 0;
- zero_timeout.tv_usec = 0;
- timeval* timeout = (state & user_set_non_blocking) ? &zero_timeout : 0;
+ timeval timeout_obj;
+ timeval* timeout;
+ if (state & user_set_non_blocking)
+ {
+ timeout_obj.tv_sec = 0;
+ timeout_obj.tv_usec = 0;
+ timeout = &timeout_obj;
+ }
+ else if (msec >= 0)
+ {
+ timeout_obj.tv_sec = msec / 1000;
+ timeout_obj.tv_usec = (msec % 1000) * 1000;
+ timeout = &timeout_obj;
+ }
+ else
+ timeout = 0;
clear_last_error();
int result = error_wrapper(::select(s + 1, 0, &fds, 0, timeout), ec);
#else // defined(BOOST_ASIO_WINDOWS)
@@ -1853,7 +1893,7 @@ int poll_write(socket_type s, state_type state, boost::system::error_code& ec)
fds.fd = s;
fds.events = POLLOUT;
fds.revents = 0;
- int timeout = (state & user_set_non_blocking) ? 0 : -1;
+ int timeout = (state & user_set_non_blocking) ? 0 : msec;
clear_last_error();
int result = error_wrapper(::poll(&fds, 1, timeout), ec);
#endif // defined(BOOST_ASIO_WINDOWS)
@@ -1867,7 +1907,61 @@ int poll_write(socket_type s, state_type state, boost::system::error_code& ec)
return result;
}
-int poll_connect(socket_type s, boost::system::error_code& ec)
+int poll_error(socket_type s, state_type state,
+ int msec, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return socket_error_retval;
+ }
+
+#if defined(BOOST_ASIO_WINDOWS) \
+ || defined(__CYGWIN__) \
+ || defined(__SYMBIAN32__)
+ fd_set fds;
+ FD_ZERO(&fds);
+ FD_SET(s, &fds);
+ timeval timeout_obj;
+ timeval* timeout;
+ if (state & user_set_non_blocking)
+ {
+ timeout_obj.tv_sec = 0;
+ timeout_obj.tv_usec = 0;
+ timeout = &timeout_obj;
+ }
+ else if (msec >= 0)
+ {
+ timeout_obj.tv_sec = msec / 1000;
+ timeout_obj.tv_usec = (msec % 1000) * 1000;
+ timeout = &timeout_obj;
+ }
+ else
+ timeout = 0;
+ clear_last_error();
+ int result = error_wrapper(::select(s + 1, 0, 0, &fds, timeout), ec);
+#else // defined(BOOST_ASIO_WINDOWS)
+ // || defined(__CYGWIN__)
+ // || defined(__SYMBIAN32__)
+ pollfd fds;
+ fds.fd = s;
+ fds.events = POLLPRI | POLLERR | POLLHUP;
+ fds.revents = 0;
+ int timeout = (state & user_set_non_blocking) ? 0 : msec;
+ clear_last_error();
+ int result = error_wrapper(::poll(&fds, 1, timeout), ec);
+#endif // defined(BOOST_ASIO_WINDOWS)
+ // || defined(__CYGWIN__)
+ // || defined(__SYMBIAN32__)
+ if (result == 0)
+ ec = (state & user_set_non_blocking)
+ ? boost::asio::error::would_block : boost::system::error_code();
+ else if (result > 0)
+ ec = boost::system::error_code();
+ return result;
+}
+
+int poll_connect(socket_type s, int msec, boost::system::error_code& ec)
{
if (s == invalid_socket)
{
@@ -1884,9 +1978,19 @@ int poll_connect(socket_type s, boost::system::error_code& ec)
fd_set except_fds;
FD_ZERO(&except_fds);
FD_SET(s, &except_fds);
+ timeval timeout_obj;
+ timeval* timeout;
+ if (msec >= 0)
+ {
+ timeout_obj.tv_sec = msec / 1000;
+ timeout_obj.tv_usec = (msec % 1000) * 1000;
+ timeout = &timeout_obj;
+ }
+ else
+ timeout = 0;
clear_last_error();
int result = error_wrapper(::select(
- s + 1, 0, &write_fds, &except_fds, 0), ec);
+ s + 1, 0, &write_fds, &except_fds, timeout), ec);
if (result >= 0)
ec = boost::system::error_code();
return result;
@@ -1898,7 +2002,7 @@ int poll_connect(socket_type s, boost::system::error_code& ec)
fds.events = POLLOUT;
fds.revents = 0;
clear_last_error();
- int result = error_wrapper(::poll(&fds, 1, -1), ec);
+ int result = error_wrapper(::poll(&fds, 1, msec), ec);
if (result >= 0)
ec = boost::system::error_code();
return result;
@@ -3346,7 +3450,6 @@ boost::system::error_code getnameinfo(const socket_addr_type* addr,
using namespace std; // For memcpy.
sockaddr_storage_type tmp_addr;
memcpy(&tmp_addr, addr, addrlen);
- tmp_addr.ss_len = addrlen;
addr = reinterpret_cast<socket_addr_type*>(&tmp_addr);
clear_last_error();
return getnameinfo_emulation(addr, addrlen,
diff --git a/boost/asio/detail/impl/strand_executor_service.hpp b/boost/asio/detail/impl/strand_executor_service.hpp
new file mode 100644
index 0000000000..6821c2ec64
--- /dev/null
+++ b/boost/asio/detail/impl/strand_executor_service.hpp
@@ -0,0 +1,181 @@
+//
+// detail/impl/strand_executor_service.hpp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2017 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP
+#define BOOST_ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/call_stack.hpp>
+#include <boost/asio/detail/fenced_block.hpp>
+#include <boost/asio/detail/handler_invoke_helpers.hpp>
+#include <boost/asio/detail/recycling_allocator.hpp>
+#include <boost/asio/executor_work_guard.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+template <typename Executor>
+class strand_executor_service::invoker
+{
+public:
+ invoker(const implementation_type& impl, Executor& ex)
+ : impl_(impl),
+ work_(ex)
+ {
+ }
+
+ invoker(const invoker& other)
+ : impl_(other.impl_),
+ work_(other.work_)
+ {
+ }
+
+#if defined(BOOST_ASIO_HAS_MOVE)
+ invoker(invoker&& other)
+ : impl_(BOOST_ASIO_MOVE_CAST(implementation_type)(other.impl_)),
+ work_(BOOST_ASIO_MOVE_CAST(executor_work_guard<Executor>)(other.work_))
+ {
+ }
+#endif // defined(BOOST_ASIO_HAS_MOVE)
+
+ struct on_invoker_exit
+ {
+ invoker* this_;
+
+ ~on_invoker_exit()
+ {
+ this_->impl_->mutex_->lock();
+ this_->impl_->ready_queue_.push(this_->impl_->waiting_queue_);
+ bool more_handlers = this_->impl_->locked_ =
+ !this_->impl_->ready_queue_.empty();
+ this_->impl_->mutex_->unlock();
+
+ if (more_handlers)
+ {
+ Executor ex(this_->work_.get_executor());
+ recycling_allocator<void> allocator;
+ ex.post(BOOST_ASIO_MOVE_CAST(invoker)(*this_), allocator);
+ }
+ }
+ };
+
+ void operator()()
+ {
+ // Indicate that this strand is executing on the current thread.
+ call_stack<strand_impl>::context ctx(impl_.get());
+
+ // Ensure the next handler, if any, is scheduled on block exit.
+ on_invoker_exit on_exit = { this };
+ (void)on_exit;
+
+ // Run all ready handlers. No lock is required since the ready queue is
+ // accessed only within the strand.
+ boost::system::error_code ec;
+ while (scheduler_operation* o = impl_->ready_queue_.front())
+ {
+ impl_->ready_queue_.pop();
+ o->complete(impl_.get(), ec, 0);
+ }
+ }
+
+private:
+ implementation_type impl_;
+ executor_work_guard<Executor> work_;
+};
+
+template <typename Executor, typename Function, typename Allocator>
+void strand_executor_service::dispatch(const implementation_type& impl,
+ Executor& ex, BOOST_ASIO_MOVE_ARG(Function) function, const Allocator& a)
+{
+ typedef typename decay<Function>::type function_type;
+
+ // If we are already in the strand then the function can run immediately.
+ if (call_stack<strand_impl>::contains(impl.get()))
+ {
+ // Make a local, non-const copy of the function.
+ function_type tmp(BOOST_ASIO_MOVE_CAST(Function)(function));
+
+ fenced_block b(fenced_block::full);
+ boost_asio_handler_invoke_helpers::invoke(tmp, tmp);
+ return;
+ }
+
+ // Allocate and construct an operation to wrap the function.
+ typedef executor_op<function_type, Allocator> op;
+ typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };
+ p.p = new (p.v) op(BOOST_ASIO_MOVE_CAST(Function)(function), a);
+
+ BOOST_ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,
+ "strand_executor", impl.get(), 0, "dispatch"));
+
+ // Add the function to the strand and schedule the strand if required.
+ bool first = enqueue(impl, p.p);
+ p.v = p.p = 0;
+ if (first)
+ ex.dispatch(invoker<Executor>(impl, ex), a);
+}
+
+// Request invocation of the given function and return immediately.
+template <typename Executor, typename Function, typename Allocator>
+void strand_executor_service::post(const implementation_type& impl,
+ Executor& ex, BOOST_ASIO_MOVE_ARG(Function) function, const Allocator& a)
+{
+ typedef typename decay<Function>::type function_type;
+
+ // Allocate and construct an operation to wrap the function.
+ typedef executor_op<function_type, Allocator> op;
+ typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };
+ p.p = new (p.v) op(BOOST_ASIO_MOVE_CAST(Function)(function), a);
+
+ BOOST_ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,
+ "strand_executor", impl.get(), 0, "post"));
+
+ // Add the function to the strand and schedule the strand if required.
+ bool first = enqueue(impl, p.p);
+ p.v = p.p = 0;
+ if (first)
+ ex.post(invoker<Executor>(impl, ex), a);
+}
+
+// Request invocation of the given function and return immediately.
+template <typename Executor, typename Function, typename Allocator>
+void strand_executor_service::defer(const implementation_type& impl,
+ Executor& ex, BOOST_ASIO_MOVE_ARG(Function) function, const Allocator& a)
+{
+ typedef typename decay<Function>::type function_type;
+
+ // Allocate and construct an operation to wrap the function.
+ typedef executor_op<function_type, Allocator> op;
+ typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };
+ p.p = new (p.v) op(BOOST_ASIO_MOVE_CAST(Function)(function), a);
+
+ BOOST_ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,
+ "strand_executor", impl.get(), 0, "defer"));
+
+ // Add the function to the strand and schedule the strand if required.
+ bool first = enqueue(impl, p.p);
+ p.v = p.p = 0;
+ if (first)
+ ex.defer(invoker<Executor>(impl, ex), a);
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // BOOST_ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP
diff --git a/boost/asio/detail/impl/strand_executor_service.ipp b/boost/asio/detail/impl/strand_executor_service.ipp
new file mode 100644
index 0000000000..353304ae27
--- /dev/null
+++ b/boost/asio/detail/impl/strand_executor_service.ipp
@@ -0,0 +1,126 @@
+//
+// detail/impl/strand_executor_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2017 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP
+#define BOOST_ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+#include <boost/asio/detail/strand_executor_service.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+strand_executor_service::strand_executor_service(execution_context& ctx)
+ : execution_context_service_base<strand_executor_service>(ctx),
+ mutex_(),
+ salt_(0),
+ impl_list_(0)
+{
+}
+
+void strand_executor_service::shutdown()
+{
+ op_queue<scheduler_operation> ops;
+
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ strand_impl* impl = impl_list_;
+ while (impl)
+ {
+ ops.push(impl->waiting_queue_);
+ ops.push(impl->ready_queue_);
+ impl = impl->next_;
+ }
+}
+
+strand_executor_service::implementation_type
+strand_executor_service::create_implementation()
+{
+ implementation_type new_impl(new strand_impl);
+ new_impl->locked_ = false;
+
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ // Select a mutex from the pool of shared mutexes.
+ std::size_t salt = salt_++;
+ std::size_t mutex_index = reinterpret_cast<std::size_t>(new_impl.get());
+ mutex_index += (reinterpret_cast<std::size_t>(new_impl.get()) >> 3);
+ mutex_index ^= salt + 0x9e3779b9 + (mutex_index << 6) + (mutex_index >> 2);
+ mutex_index = mutex_index % num_mutexes;
+ if (!mutexes_[mutex_index].get())
+ mutexes_[mutex_index].reset(new mutex);
+ new_impl->mutex_ = mutexes_[mutex_index].get();
+
+ // Insert implementation into linked list of all implementations.
+ new_impl->next_ = impl_list_;
+ new_impl->prev_ = 0;
+ if (impl_list_)
+ impl_list_->prev_ = new_impl.get();
+ impl_list_ = new_impl.get();
+ new_impl->service_ = this;
+
+ return new_impl;
+}
+
+strand_executor_service::strand_impl::~strand_impl()
+{
+ boost::asio::detail::mutex::scoped_lock lock(service_->mutex_);
+
+ // Remove implementation from linked list of all implementations.
+ if (service_->impl_list_ == this)
+ service_->impl_list_ = next_;
+ if (prev_)
+ prev_->next_ = next_;
+ if (next_)
+ next_->prev_= prev_;
+}
+
+bool strand_executor_service::enqueue(const implementation_type& impl,
+ scheduler_operation* op)
+{
+ impl->mutex_->lock();
+ if (impl->locked_)
+ {
+ // Some other function already holds the strand lock. Enqueue for later.
+ impl->waiting_queue_.push(op);
+ impl->mutex_->unlock();
+ return false;
+ }
+ else
+ {
+ // The function is acquiring the strand lock and so is responsible for
+ // scheduling the strand.
+ impl->locked_ = true;
+ impl->mutex_->unlock();
+ impl->ready_queue_.push(op);
+ return true;
+ }
+}
+
+bool strand_executor_service::running_in_this_thread(
+ const implementation_type& impl)
+{
+ return !!call_stack<strand_impl>::contains(impl.get());
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // BOOST_ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP
diff --git a/boost/asio/detail/impl/strand_service.hpp b/boost/asio/detail/impl/strand_service.hpp
index 4167f686d1..df21099971 100644
--- a/boost/asio/detail/impl/strand_service.hpp
+++ b/boost/asio/detail/impl/strand_service.hpp
@@ -15,12 +15,12 @@
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
-#include <boost/asio/detail/addressof.hpp>
#include <boost/asio/detail/call_stack.hpp>
#include <boost/asio/detail/completion_handler.hpp>
#include <boost/asio/detail/fenced_block.hpp>
#include <boost/asio/detail/handler_alloc_helpers.hpp>
#include <boost/asio/detail/handler_invoke_helpers.hpp>
+#include <boost/asio/detail/memory.hpp>
#include <boost/asio/detail/push_options.hpp>
@@ -36,7 +36,7 @@ inline strand_service::strand_impl::strand_impl()
struct strand_service::on_dispatch_exit
{
- io_service_impl* io_service_;
+ io_context_impl* io_context_;
strand_impl* impl_;
~on_dispatch_exit()
@@ -47,7 +47,7 @@ struct strand_service::on_dispatch_exit
impl_->mutex_.unlock();
if (more_handlers)
- io_service_->post_immediate_completion(impl_, false);
+ io_context_->post_immediate_completion(impl_, false);
}
};
@@ -66,11 +66,11 @@ void strand_service::dispatch(strand_service::implementation_type& impl,
// Allocate and construct an operation to wrap the handler.
typedef completion_handler<Handler> op;
typename op::ptr p = { boost::asio::detail::addressof(handler),
- boost_asio_handler_alloc_helpers::allocate(
- sizeof(op), handler), 0 };
+ op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(handler);
- BOOST_ASIO_HANDLER_CREATION((p.p, "strand", impl, "dispatch"));
+ BOOST_ASIO_HANDLER_CREATION((this->context(),
+ *p.p, "strand", impl, 0, "dispatch"));
bool dispatch_immediately = do_dispatch(impl, p.p);
operation* o = p.p;
@@ -82,15 +82,15 @@ void strand_service::dispatch(strand_service::implementation_type& impl,
call_stack<strand_impl>::context ctx(impl);
// Ensure the next handler, if any, is scheduled on block exit.
- on_dispatch_exit on_exit = { &io_service_, impl };
+ on_dispatch_exit on_exit = { &io_context_, impl };
(void)on_exit;
completion_handler<Handler>::do_complete(
- &io_service_, o, boost::system::error_code(), 0);
+ &io_context_, o, boost::system::error_code(), 0);
}
}
-// Request the io_service to invoke the given handler and return immediately.
+// Request the io_context to invoke the given handler and return immediately.
template <typename Handler>
void strand_service::post(strand_service::implementation_type& impl,
Handler& handler)
@@ -101,11 +101,11 @@ void strand_service::post(strand_service::implementation_type& impl,
// Allocate and construct an operation to wrap the handler.
typedef completion_handler<Handler> op;
typename op::ptr p = { boost::asio::detail::addressof(handler),
- boost_asio_handler_alloc_helpers::allocate(
- sizeof(op), handler), 0 };
+ op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(handler);
- BOOST_ASIO_HANDLER_CREATION((p.p, "strand", impl, "post"));
+ BOOST_ASIO_HANDLER_CREATION((this->context(),
+ *p.p, "strand", impl, 0, "post"));
do_post(impl, p.p, is_continuation);
p.v = p.p = 0;
diff --git a/boost/asio/detail/impl/strand_service.ipp b/boost/asio/detail/impl/strand_service.ipp
index 54ecde2cc7..e92355d837 100644
--- a/boost/asio/detail/impl/strand_service.ipp
+++ b/boost/asio/detail/impl/strand_service.ipp
@@ -27,7 +27,7 @@ namespace detail {
struct strand_service::on_do_complete_exit
{
- io_service_impl* owner_;
+ io_context_impl* owner_;
strand_impl* impl_;
~on_do_complete_exit()
@@ -42,15 +42,15 @@ struct strand_service::on_do_complete_exit
}
};
-strand_service::strand_service(boost::asio::io_service& io_service)
- : boost::asio::detail::service_base<strand_service>(io_service),
- io_service_(boost::asio::use_service<io_service_impl>(io_service)),
+strand_service::strand_service(boost::asio::io_context& io_context)
+ : boost::asio::detail::service_base<strand_service>(io_context),
+ io_context_(boost::asio::use_service<io_context_impl>(io_context)),
mutex_(),
salt_(0)
{
}
-void strand_service::shutdown_service()
+void strand_service::shutdown()
{
op_queue<operation> ops;
@@ -93,9 +93,9 @@ bool strand_service::running_in_this_thread(
bool strand_service::do_dispatch(implementation_type& impl, operation* op)
{
- // If we are running inside the io_service, and no other handler already
+ // If we are running inside the io_context, and no other handler already
// holds the strand lock, then the handler can run immediately.
- bool can_dispatch = io_service_.can_dispatch();
+ bool can_dispatch = io_context_.can_dispatch();
impl->mutex_.lock();
if (can_dispatch && !impl->locked_)
{
@@ -118,7 +118,7 @@ bool strand_service::do_dispatch(implementation_type& impl, operation* op)
impl->locked_ = true;
impl->mutex_.unlock();
impl->ready_queue_.push(op);
- io_service_.post_immediate_completion(impl, false);
+ io_context_.post_immediate_completion(impl, false);
}
return false;
@@ -141,11 +141,11 @@ void strand_service::do_post(implementation_type& impl,
impl->locked_ = true;
impl->mutex_.unlock();
impl->ready_queue_.push(op);
- io_service_.post_immediate_completion(impl, is_continuation);
+ io_context_.post_immediate_completion(impl, is_continuation);
}
}
-void strand_service::do_complete(io_service_impl* owner, operation* base,
+void strand_service::do_complete(void* owner, operation* base,
const boost::system::error_code& ec, std::size_t /*bytes_transferred*/)
{
if (owner)
@@ -156,15 +156,16 @@ void strand_service::do_complete(io_service_impl* owner, operation* base,
call_stack<strand_impl>::context ctx(impl);
// Ensure the next handler, if any, is scheduled on block exit.
- on_do_complete_exit on_exit = { owner, impl };
- (void)on_exit;
+ on_do_complete_exit on_exit;
+ on_exit.owner_ = static_cast<io_context_impl*>(owner);
+ on_exit.impl_ = impl;
// Run all ready handlers. No lock is required since the ready queue is
// accessed only within the strand.
while (operation* o = impl->ready_queue_.front())
{
impl->ready_queue_.pop();
- o->complete(*owner, ec, 0);
+ o->complete(owner, ec, 0);
}
}
}
diff --git a/boost/asio/detail/impl/task_io_service.hpp b/boost/asio/detail/impl/task_io_service.hpp
deleted file mode 100644
index 09b4ba6985..0000000000
--- a/boost/asio/detail/impl/task_io_service.hpp
+++ /dev/null
@@ -1,80 +0,0 @@
-//
-// detail/impl/task_io_service.hpp
-// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-//
-// Copyright (c) 2003-2017 Christopher M. Kohlhoff (chris at kohlhoff dot com)
-//
-// Distributed under the Boost Software License, Version 1.0. (See accompanying
-// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
-//
-
-#ifndef BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_HPP
-#define BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_HPP
-
-#if defined(_MSC_VER) && (_MSC_VER >= 1200)
-# pragma once
-#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
-
-#include <boost/asio/detail/addressof.hpp>
-#include <boost/asio/detail/completion_handler.hpp>
-#include <boost/asio/detail/fenced_block.hpp>
-#include <boost/asio/detail/handler_alloc_helpers.hpp>
-#include <boost/asio/detail/handler_cont_helpers.hpp>
-#include <boost/asio/detail/handler_invoke_helpers.hpp>
-
-#include <boost/asio/detail/push_options.hpp>
-
-namespace boost {
-namespace asio {
-namespace detail {
-
-template <typename Handler>
-void task_io_service::dispatch(Handler& handler)
-{
- if (thread_call_stack::contains(this))
- {
- fenced_block b(fenced_block::full);
- boost_asio_handler_invoke_helpers::invoke(handler, handler);
- }
- else
- {
- // Allocate and construct an operation to wrap the handler.
- typedef completion_handler<Handler> op;
- typename op::ptr p = { boost::asio::detail::addressof(handler),
- boost_asio_handler_alloc_helpers::allocate(
- sizeof(op), handler), 0 };
- p.p = new (p.v) op(handler);
-
- BOOST_ASIO_HANDLER_CREATION((p.p, "io_service", this, "dispatch"));
-
- do_dispatch(p.p);
- p.v = p.p = 0;
- }
-}
-
-template <typename Handler>
-void task_io_service::post(Handler& handler)
-{
- bool is_continuation =
- boost_asio_handler_cont_helpers::is_continuation(handler);
-
- // Allocate and construct an operation to wrap the handler.
- typedef completion_handler<Handler> op;
- typename op::ptr p = { boost::asio::detail::addressof(handler),
- boost_asio_handler_alloc_helpers::allocate(
- sizeof(op), handler), 0 };
- p.p = new (p.v) op(handler);
-
- BOOST_ASIO_HANDLER_CREATION((p.p, "io_service", this, "post"));
-
- post_immediate_completion(p.p, is_continuation);
- p.v = p.p = 0;
-}
-
-} // namespace detail
-} // namespace asio
-} // namespace boost
-
-#include <boost/asio/detail/pop_options.hpp>
-
-#endif // BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_HPP
diff --git a/boost/asio/detail/impl/timer_queue_ptime.ipp b/boost/asio/detail/impl/timer_queue_ptime.ipp
index 0216d20ba5..885ee7d06e 100644
--- a/boost/asio/detail/impl/timer_queue_ptime.ipp
+++ b/boost/asio/detail/impl/timer_queue_ptime.ipp
@@ -16,12 +16,13 @@
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_HAS_BOOST_DATE_TIME)
+
#include <boost/asio/detail/timer_queue_ptime.hpp>
#include <boost/asio/detail/push_options.hpp>
-#if defined(BOOST_ASIO_HAS_BOOST_DATE_TIME)
-
namespace boost {
namespace asio {
namespace detail {
@@ -75,12 +76,18 @@ std::size_t timer_queue<time_traits<boost::posix_time::ptime> >::cancel_timer(
return impl_.cancel_timer(timer, ops, max_cancelled);
}
+void timer_queue<time_traits<boost::posix_time::ptime> >::move_timer(
+ per_timer_data& target, per_timer_data& source)
+{
+ impl_.move_timer(target, source);
+}
+
} // namespace detail
} // namespace asio
} // namespace boost
-#endif // defined(BOOST_ASIO_HAS_BOOST_DATE_TIME)
-
#include <boost/asio/detail/pop_options.hpp>
+#endif // defined(BOOST_ASIO_HAS_BOOST_DATE_TIME)
+
#endif // BOOST_ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP
diff --git a/boost/asio/detail/impl/win_event.ipp b/boost/asio/detail/impl/win_event.ipp
index 2519d3165d..4e084de2ba 100644
--- a/boost/asio/detail/impl/win_event.ipp
+++ b/boost/asio/detail/impl/win_event.ipp
@@ -33,7 +33,8 @@ win_event::win_event()
: state_(0)
{
#if defined(BOOST_ASIO_WINDOWS_APP)
- events_[0] = ::CreateEventExW(0, 0, CREATE_EVENT_MANUAL_RESET, 0);
+ events_[0] = ::CreateEventExW(0, 0,
+ CREATE_EVENT_MANUAL_RESET, EVENT_ALL_ACCESS);
#else // defined(BOOST_ASIO_WINDOWS_APP)
events_[0] = ::CreateEventW(0, true, false, 0);
#endif // defined(BOOST_ASIO_WINDOWS_APP)
@@ -46,7 +47,7 @@ win_event::win_event()
}
#if defined(BOOST_ASIO_WINDOWS_APP)
- events_[1] = ::CreateEventExW(0, 0, 0, 0);
+ events_[1] = ::CreateEventExW(0, 0, 0, EVENT_ALL_ACCESS);
#else // defined(BOOST_ASIO_WINDOWS_APP)
events_[1] = ::CreateEventW(0, false, false, 0);
#endif // defined(BOOST_ASIO_WINDOWS_APP)
diff --git a/boost/asio/detail/impl/win_iocp_handle_service.ipp b/boost/asio/detail/impl/win_iocp_handle_service.ipp
index ea513bc1dd..4f35a1c0a8 100644
--- a/boost/asio/detail/impl/win_iocp_handle_service.ipp
+++ b/boost/asio/detail/impl/win_iocp_handle_service.ipp
@@ -67,14 +67,15 @@ public:
};
win_iocp_handle_service::win_iocp_handle_service(
- boost::asio::io_service& io_service)
- : iocp_service_(boost::asio::use_service<win_iocp_io_service>(io_service)),
+ boost::asio::io_context& io_context)
+ : service_base<win_iocp_handle_service>(io_context),
+ iocp_service_(boost::asio::use_service<win_iocp_io_context>(io_context)),
mutex_(),
impl_list_(0)
{
}
-void win_iocp_handle_service::shutdown_service()
+void win_iocp_handle_service::shutdown()
{
// Close all implementations, causing all operations to complete.
boost::asio::detail::mutex::scoped_lock lock(mutex_);
@@ -200,7 +201,8 @@ boost::system::error_code win_iocp_handle_service::close(
{
if (is_open(impl))
{
- BOOST_ASIO_HANDLER_OPERATION(("handle", &impl, "close"));
+ BOOST_ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle",
+ &impl, reinterpret_cast<uintmax_t>(impl.handle_), "close"));
if (!::CloseHandle(impl.handle_))
{
@@ -234,7 +236,8 @@ boost::system::error_code win_iocp_handle_service::cancel(
return ec;
}
- BOOST_ASIO_HANDLER_OPERATION(("handle", &impl, "cancel"));
+ BOOST_ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle",
+ &impl, reinterpret_cast<uintmax_t>(impl.handle_), "cancel"));
if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(
::GetModuleHandleA("KERNEL32"), "CancelIoEx"))
@@ -304,7 +307,7 @@ size_t win_iocp_handle_service::do_write(
}
// A request to write 0 bytes on a handle is a no-op.
- if (boost::asio::buffer_size(buffer) == 0)
+ if (buffer.size() == 0)
{
ec = boost::system::error_code();
return 0;
@@ -319,9 +322,8 @@ size_t win_iocp_handle_service::do_write(
// Write the data.
overlapped.Offset = offset & 0xFFFFFFFF;
overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
- BOOL ok = ::WriteFile(impl.handle_,
- boost::asio::buffer_cast<LPCVOID>(buffer),
- static_cast<DWORD>(boost::asio::buffer_size(buffer)), 0, &overlapped);
+ BOOL ok = ::WriteFile(impl.handle_, buffer.data(),
+ static_cast<DWORD>(buffer.size()), 0, &overlapped);
if (!ok)
{
DWORD last_error = ::GetLastError();
@@ -360,7 +362,7 @@ void win_iocp_handle_service::start_write_op(
{
iocp_service_.on_completion(op, boost::asio::error::bad_descriptor);
}
- else if (boost::asio::buffer_size(buffer) == 0)
+ else if (buffer.size() == 0)
{
// A request to write 0 bytes on a handle is a no-op.
iocp_service_.on_completion(op);
@@ -370,9 +372,8 @@ void win_iocp_handle_service::start_write_op(
DWORD bytes_transferred = 0;
op->Offset = offset & 0xFFFFFFFF;
op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
- BOOL ok = ::WriteFile(impl.handle_,
- boost::asio::buffer_cast<LPCVOID>(buffer),
- static_cast<DWORD>(boost::asio::buffer_size(buffer)),
+ BOOL ok = ::WriteFile(impl.handle_, buffer.data(),
+ static_cast<DWORD>(buffer.size()),
&bytes_transferred, op);
DWORD last_error = ::GetLastError();
if (!ok && last_error != ERROR_IO_PENDING
@@ -398,7 +399,7 @@ size_t win_iocp_handle_service::do_read(
}
// A request to read 0 bytes on a stream handle is a no-op.
- if (boost::asio::buffer_size(buffer) == 0)
+ if (buffer.size() == 0)
{
ec = boost::system::error_code();
return 0;
@@ -413,9 +414,8 @@ size_t win_iocp_handle_service::do_read(
// Read some data.
overlapped.Offset = offset & 0xFFFFFFFF;
overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
- BOOL ok = ::ReadFile(impl.handle_,
- boost::asio::buffer_cast<LPVOID>(buffer),
- static_cast<DWORD>(boost::asio::buffer_size(buffer)), 0, &overlapped);
+ BOOL ok = ::ReadFile(impl.handle_, buffer.data(),
+ static_cast<DWORD>(buffer.size()), 0, &overlapped);
if (!ok)
{
DWORD last_error = ::GetLastError();
@@ -468,7 +468,7 @@ void win_iocp_handle_service::start_read_op(
{
iocp_service_.on_completion(op, boost::asio::error::bad_descriptor);
}
- else if (boost::asio::buffer_size(buffer) == 0)
+ else if (buffer.size() == 0)
{
// A request to read 0 bytes on a handle is a no-op.
iocp_service_.on_completion(op);
@@ -478,9 +478,8 @@ void win_iocp_handle_service::start_read_op(
DWORD bytes_transferred = 0;
op->Offset = offset & 0xFFFFFFFF;
op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
- BOOL ok = ::ReadFile(impl.handle_,
- boost::asio::buffer_cast<LPVOID>(buffer),
- static_cast<DWORD>(boost::asio::buffer_size(buffer)),
+ BOOL ok = ::ReadFile(impl.handle_, buffer.data(),
+ static_cast<DWORD>(buffer.size()),
&bytes_transferred, op);
DWORD last_error = ::GetLastError();
if (!ok && last_error != ERROR_IO_PENDING
@@ -508,7 +507,8 @@ void win_iocp_handle_service::close_for_destruction(implementation_type& impl)
{
if (is_open(impl))
{
- BOOST_ASIO_HANDLER_OPERATION(("handle", &impl, "close"));
+ BOOST_ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle",
+ &impl, reinterpret_cast<uintmax_t>(impl.handle_), "close"));
::CloseHandle(impl.handle_);
impl.handle_ = INVALID_HANDLE_VALUE;
diff --git a/boost/asio/detail/impl/win_iocp_io_service.hpp b/boost/asio/detail/impl/win_iocp_io_context.hpp
index e3505cb852..948b0c93d4 100644
--- a/boost/asio/detail/impl/win_iocp_io_service.hpp
+++ b/boost/asio/detail/impl/win_iocp_io_context.hpp
@@ -1,5 +1,5 @@
//
-// detail/impl/win_iocp_io_service.hpp
+// detail/impl/win_iocp_io_context.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2017 Christopher M. Kohlhoff (chris at kohlhoff dot com)
@@ -8,8 +8,8 @@
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
-#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_HPP
-#define BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_HPP
+#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP
+#define BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
@@ -19,11 +19,11 @@
#if defined(BOOST_ASIO_HAS_IOCP)
-#include <boost/asio/detail/addressof.hpp>
#include <boost/asio/detail/completion_handler.hpp>
#include <boost/asio/detail/fenced_block.hpp>
#include <boost/asio/detail/handler_alloc_helpers.hpp>
#include <boost/asio/detail/handler_invoke_helpers.hpp>
+#include <boost/asio/detail/memory.hpp>
#include <boost/asio/detail/push_options.hpp>
@@ -31,62 +31,22 @@ namespace boost {
namespace asio {
namespace detail {
-template <typename Handler>
-void win_iocp_io_service::dispatch(Handler& handler)
-{
- if (thread_call_stack::contains(this))
- {
- fenced_block b(fenced_block::full);
- boost_asio_handler_invoke_helpers::invoke(handler, handler);
- }
- else
- {
- // Allocate and construct an operation to wrap the handler.
- typedef completion_handler<Handler> op;
- typename op::ptr p = { boost::asio::detail::addressof(handler),
- boost_asio_handler_alloc_helpers::allocate(
- sizeof(op), handler), 0 };
- p.p = new (p.v) op(handler);
-
- BOOST_ASIO_HANDLER_CREATION((p.p, "io_service", this, "dispatch"));
-
- post_immediate_completion(p.p, false);
- p.v = p.p = 0;
- }
-}
-
-template <typename Handler>
-void win_iocp_io_service::post(Handler& handler)
-{
- // Allocate and construct an operation to wrap the handler.
- typedef completion_handler<Handler> op;
- typename op::ptr p = { boost::asio::detail::addressof(handler),
- boost_asio_handler_alloc_helpers::allocate(
- sizeof(op), handler), 0 };
- p.p = new (p.v) op(handler);
-
- BOOST_ASIO_HANDLER_CREATION((p.p, "io_service", this, "post"));
-
- post_immediate_completion(p.p, false);
- p.v = p.p = 0;
-}
-
template <typename Time_Traits>
-void win_iocp_io_service::add_timer_queue(
+void win_iocp_io_context::add_timer_queue(
timer_queue<Time_Traits>& queue)
{
do_add_timer_queue(queue);
}
template <typename Time_Traits>
-void win_iocp_io_service::remove_timer_queue(
+void win_iocp_io_context::remove_timer_queue(
timer_queue<Time_Traits>& queue)
{
do_remove_timer_queue(queue);
}
template <typename Time_Traits>
-void win_iocp_io_service::schedule_timer(timer_queue<Time_Traits>& queue,
+void win_iocp_io_context::schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
{
@@ -106,7 +66,7 @@ void win_iocp_io_service::schedule_timer(timer_queue<Time_Traits>& queue,
}
template <typename Time_Traits>
-std::size_t win_iocp_io_service::cancel_timer(timer_queue<Time_Traits>& queue,
+std::size_t win_iocp_io_context::cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled)
{
@@ -121,6 +81,19 @@ std::size_t win_iocp_io_service::cancel_timer(timer_queue<Time_Traits>& queue,
return n;
}
+template <typename Time_Traits>
+void win_iocp_io_context::move_timer(timer_queue<Time_Traits>& queue,
+ typename timer_queue<Time_Traits>::per_timer_data& to,
+ typename timer_queue<Time_Traits>::per_timer_data& from)
+{
+ boost::asio::detail::mutex::scoped_lock lock(dispatch_mutex_);
+ op_queue<operation> ops;
+ queue.cancel_timer(to, ops);
+ queue.move_timer(to, from);
+ lock.unlock();
+ post_deferred_completions(ops);
+}
+
} // namespace detail
} // namespace asio
} // namespace boost
@@ -129,4 +102,4 @@ std::size_t win_iocp_io_service::cancel_timer(timer_queue<Time_Traits>& queue,
#endif // defined(BOOST_ASIO_HAS_IOCP)
-#endif // BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_HPP
+#endif // BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP
diff --git a/boost/asio/detail/impl/win_iocp_io_service.ipp b/boost/asio/detail/impl/win_iocp_io_context.ipp
index a5bfcbbdea..9071836aa9 100644
--- a/boost/asio/detail/impl/win_iocp_io_service.ipp
+++ b/boost/asio/detail/impl/win_iocp_io_context.ipp
@@ -1,5 +1,5 @@
//
-// detail/impl/win_iocp_io_service.ipp
+// detail/impl/win_iocp_io_context.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2017 Christopher M. Kohlhoff (chris at kohlhoff dot com)
@@ -8,8 +8,8 @@
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
-#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_IPP
-#define BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_IPP
+#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP
+#define BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
@@ -20,13 +20,12 @@
#if defined(BOOST_ASIO_HAS_IOCP)
#include <boost/asio/error.hpp>
-#include <boost/asio/io_service.hpp>
#include <boost/asio/detail/cstdint.hpp>
#include <boost/asio/detail/handler_alloc_helpers.hpp>
#include <boost/asio/detail/handler_invoke_helpers.hpp>
#include <boost/asio/detail/limits.hpp>
#include <boost/asio/detail/throw_error.hpp>
-#include <boost/asio/detail/win_iocp_io_service.hpp>
+#include <boost/asio/detail/win_iocp_io_context.hpp>
#include <boost/asio/detail/push_options.hpp>
@@ -34,51 +33,51 @@ namespace boost {
namespace asio {
namespace detail {
-struct win_iocp_io_service::work_finished_on_block_exit
+struct win_iocp_io_context::work_finished_on_block_exit
{
~work_finished_on_block_exit()
{
- io_service_->work_finished();
+ io_context_->work_finished();
}
- win_iocp_io_service* io_service_;
+ win_iocp_io_context* io_context_;
};
-struct win_iocp_io_service::timer_thread_function
+struct win_iocp_io_context::timer_thread_function
{
void operator()()
{
- while (::InterlockedExchangeAdd(&io_service_->shutdown_, 0) == 0)
+ while (::InterlockedExchangeAdd(&io_context_->shutdown_, 0) == 0)
{
- if (::WaitForSingleObject(io_service_->waitable_timer_.handle,
+ if (::WaitForSingleObject(io_context_->waitable_timer_.handle,
INFINITE) == WAIT_OBJECT_0)
{
- ::InterlockedExchange(&io_service_->dispatch_required_, 1);
- ::PostQueuedCompletionStatus(io_service_->iocp_.handle,
+ ::InterlockedExchange(&io_context_->dispatch_required_, 1);
+ ::PostQueuedCompletionStatus(io_context_->iocp_.handle,
0, wake_for_dispatch, 0);
}
}
}
- win_iocp_io_service* io_service_;
+ win_iocp_io_context* io_context_;
};
-win_iocp_io_service::win_iocp_io_service(
- boost::asio::io_service& io_service, size_t concurrency_hint)
- : boost::asio::detail::service_base<win_iocp_io_service>(io_service),
+win_iocp_io_context::win_iocp_io_context(
+ boost::asio::execution_context& ctx, int concurrency_hint)
+ : execution_context_service_base<win_iocp_io_context>(ctx),
iocp_(),
outstanding_work_(0),
stopped_(0),
stop_event_posted_(0),
shutdown_(0),
gqcs_timeout_(get_gqcs_timeout()),
- dispatch_required_(0)
+ dispatch_required_(0),
+ concurrency_hint_(concurrency_hint)
{
BOOST_ASIO_HANDLER_TRACKING_INIT;
iocp_.handle = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0,
- static_cast<DWORD>(concurrency_hint < DWORD(~0)
- ? concurrency_hint : DWORD(~0)));
+ static_cast<DWORD>(concurrency_hint >= 0 ? concurrency_hint : DWORD(~0)));
if (!iocp_.handle)
{
DWORD last_error = ::GetLastError();
@@ -88,7 +87,7 @@ win_iocp_io_service::win_iocp_io_service(
}
}
-void win_iocp_io_service::shutdown_service()
+void win_iocp_io_context::shutdown()
{
::InterlockedExchange(&shutdown_, 1);
@@ -132,7 +131,7 @@ void win_iocp_io_service::shutdown_service()
timer_thread_->join();
}
-boost::system::error_code win_iocp_io_service::register_handle(
+boost::system::error_code win_iocp_io_context::register_handle(
HANDLE handle, boost::system::error_code& ec)
{
if (::CreateIoCompletionPort(handle, iocp_.handle, 0, 0) == 0)
@@ -148,7 +147,7 @@ boost::system::error_code win_iocp_io_service::register_handle(
return ec;
}
-size_t win_iocp_io_service::run(boost::system::error_code& ec)
+size_t win_iocp_io_context::run(boost::system::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
@@ -161,13 +160,13 @@ size_t win_iocp_io_service::run(boost::system::error_code& ec)
thread_call_stack::context ctx(this, this_thread);
size_t n = 0;
- while (do_one(true, ec))
+ while (do_one(INFINITE, ec))
if (n != (std::numeric_limits<size_t>::max)())
++n;
return n;
}
-size_t win_iocp_io_service::run_one(boost::system::error_code& ec)
+size_t win_iocp_io_context::run_one(boost::system::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
@@ -179,10 +178,25 @@ size_t win_iocp_io_service::run_one(boost::system::error_code& ec)
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
- return do_one(true, ec);
+ return do_one(INFINITE, ec);
}
-size_t win_iocp_io_service::poll(boost::system::error_code& ec)
+size_t win_iocp_io_context::wait_one(long usec, boost::system::error_code& ec)
+{
+ if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
+ {
+ stop();
+ ec = boost::system::error_code();
+ return 0;
+ }
+
+ win_iocp_thread_info this_thread;
+ thread_call_stack::context ctx(this, this_thread);
+
+ return do_one(usec < 0 ? INFINITE : ((usec - 1) / 1000 + 1), ec);
+}
+
+size_t win_iocp_io_context::poll(boost::system::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
@@ -195,13 +209,13 @@ size_t win_iocp_io_service::poll(boost::system::error_code& ec)
thread_call_stack::context ctx(this, this_thread);
size_t n = 0;
- while (do_one(false, ec))
+ while (do_one(0, ec))
if (n != (std::numeric_limits<size_t>::max)())
++n;
return n;
}
-size_t win_iocp_io_service::poll_one(boost::system::error_code& ec)
+size_t win_iocp_io_context::poll_one(boost::system::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
@@ -213,10 +227,10 @@ size_t win_iocp_io_service::poll_one(boost::system::error_code& ec)
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
- return do_one(false, ec);
+ return do_one(0, ec);
}
-void win_iocp_io_service::stop()
+void win_iocp_io_context::stop()
{
if (::InterlockedExchange(&stopped_, 1) == 0)
{
@@ -233,7 +247,7 @@ void win_iocp_io_service::stop()
}
}
-void win_iocp_io_service::post_deferred_completion(win_iocp_operation* op)
+void win_iocp_io_context::post_deferred_completion(win_iocp_operation* op)
{
// Flag the operation as ready.
op->ready_ = 1;
@@ -248,7 +262,7 @@ void win_iocp_io_service::post_deferred_completion(win_iocp_operation* op)
}
}
-void win_iocp_io_service::post_deferred_completions(
+void win_iocp_io_context::post_deferred_completions(
op_queue<win_iocp_operation>& ops)
{
while (win_iocp_operation* op = ops.front())
@@ -270,7 +284,7 @@ void win_iocp_io_service::post_deferred_completions(
}
}
-void win_iocp_io_service::abandon_operations(
+void win_iocp_io_context::abandon_operations(
op_queue<win_iocp_operation>& ops)
{
while (win_iocp_operation* op = ops.front())
@@ -281,7 +295,7 @@ void win_iocp_io_service::abandon_operations(
}
}
-void win_iocp_io_service::on_pending(win_iocp_operation* op)
+void win_iocp_io_context::on_pending(win_iocp_operation* op)
{
if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)
{
@@ -297,7 +311,7 @@ void win_iocp_io_service::on_pending(win_iocp_operation* op)
}
}
-void win_iocp_io_service::on_completion(win_iocp_operation* op,
+void win_iocp_io_context::on_completion(win_iocp_operation* op,
DWORD last_error, DWORD bytes_transferred)
{
// Flag that the operation is ready for invocation.
@@ -320,7 +334,7 @@ void win_iocp_io_service::on_completion(win_iocp_operation* op,
}
}
-void win_iocp_io_service::on_completion(win_iocp_operation* op,
+void win_iocp_io_context::on_completion(win_iocp_operation* op,
const boost::system::error_code& ec, DWORD bytes_transferred)
{
// Flag that the operation is ready for invocation.
@@ -342,7 +356,7 @@ void win_iocp_io_service::on_completion(win_iocp_operation* op,
}
}
-size_t win_iocp_io_service::do_one(bool block, boost::system::error_code& ec)
+size_t win_iocp_io_context::do_one(DWORD msec, boost::system::error_code& ec)
{
for (;;)
{
@@ -364,8 +378,9 @@ size_t win_iocp_io_service::do_one(bool block, boost::system::error_code& ec)
dword_ptr_t completion_key = 0;
LPOVERLAPPED overlapped = 0;
::SetLastError(0);
- BOOL ok = ::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred,
- &completion_key, &overlapped, block ? gqcs_timeout_ : 0);
+ BOOL ok = ::GetQueuedCompletionStatus(iocp_.handle,
+ &bytes_transferred, &completion_key, &overlapped,
+ msec < gqcs_timeout_ ? msec : gqcs_timeout_);
DWORD last_error = ::GetLastError();
if (overlapped)
@@ -402,7 +417,7 @@ size_t win_iocp_io_service::do_one(bool block, boost::system::error_code& ec)
work_finished_on_block_exit on_exit = { this };
(void)on_exit;
- op->complete(*this, result_ec, bytes_transferred);
+ op->complete(this, result_ec, bytes_transferred);
ec = boost::system::error_code();
return 1;
}
@@ -416,8 +431,9 @@ size_t win_iocp_io_service::do_one(bool block, boost::system::error_code& ec)
return 0;
}
- // If we're not polling we need to keep going until we get a real handler.
- if (block)
+ // If we're waiting indefinitely we need to keep going until we get a
+ // real handler.
+ if (msec == INFINITE)
continue;
ec = boost::system::error_code();
@@ -456,7 +472,7 @@ size_t win_iocp_io_service::do_one(bool block, boost::system::error_code& ec)
}
}
-DWORD win_iocp_io_service::get_gqcs_timeout()
+DWORD win_iocp_io_context::get_gqcs_timeout()
{
OSVERSIONINFOEX osvi;
ZeroMemory(&osvi, sizeof(osvi));
@@ -472,7 +488,7 @@ DWORD win_iocp_io_service::get_gqcs_timeout()
return default_gqcs_timeout;
}
-void win_iocp_io_service::do_add_timer_queue(timer_queue_base& queue)
+void win_iocp_io_context::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(dispatch_mutex_);
@@ -503,14 +519,14 @@ void win_iocp_io_service::do_add_timer_queue(timer_queue_base& queue)
}
}
-void win_iocp_io_service::do_remove_timer_queue(timer_queue_base& queue)
+void win_iocp_io_context::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(dispatch_mutex_);
timer_queues_.erase(&queue);
}
-void win_iocp_io_service::update_timeout()
+void win_iocp_io_context::update_timeout()
{
if (timer_thread_.get())
{
@@ -537,4 +553,4 @@ void win_iocp_io_service::update_timeout()
#endif // defined(BOOST_ASIO_HAS_IOCP)
-#endif // BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_IPP
+#endif // BOOST_ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP
diff --git a/boost/asio/detail/impl/win_iocp_serial_port_service.ipp b/boost/asio/detail/impl/win_iocp_serial_port_service.ipp
index 32ba34684b..c0d2dbbd59 100644
--- a/boost/asio/detail/impl/win_iocp_serial_port_service.ipp
+++ b/boost/asio/detail/impl/win_iocp_serial_port_service.ipp
@@ -30,12 +30,13 @@ namespace asio {
namespace detail {
win_iocp_serial_port_service::win_iocp_serial_port_service(
- boost::asio::io_service& io_service)
- : handle_service_(io_service)
+ boost::asio::io_context& io_context)
+ : service_base<win_iocp_serial_port_service>(io_context),
+ handle_service_(io_context)
{
}
-void win_iocp_serial_port_service::shutdown_service()
+void win_iocp_serial_port_service::shutdown()
{
}
diff --git a/boost/asio/detail/impl/win_iocp_socket_service_base.ipp b/boost/asio/detail/impl/win_iocp_socket_service_base.ipp
index 93f5ed0713..e9a5049345 100644
--- a/boost/asio/detail/impl/win_iocp_socket_service_base.ipp
+++ b/boost/asio/detail/impl/win_iocp_socket_service_base.ipp
@@ -28,24 +28,24 @@ namespace asio {
namespace detail {
win_iocp_socket_service_base::win_iocp_socket_service_base(
- boost::asio::io_service& io_service)
- : io_service_(io_service),
- iocp_service_(use_service<win_iocp_io_service>(io_service)),
+ boost::asio::io_context& io_context)
+ : io_context_(io_context),
+ iocp_service_(use_service<win_iocp_io_context>(io_context)),
reactor_(0),
connect_ex_(0),
+ nt_set_info_(0),
mutex_(),
impl_list_(0)
{
}
-void win_iocp_socket_service_base::shutdown_service()
+void win_iocp_socket_service_base::base_shutdown()
{
// Close all implementations, causing all operations to complete.
boost::asio::detail::mutex::scoped_lock lock(mutex_);
base_implementation_type* impl = impl_list_;
while (impl)
{
- boost::system::error_code ignored_ec;
close_for_destruction(*impl);
impl = impl->next_;
}
@@ -167,12 +167,13 @@ boost::system::error_code win_iocp_socket_service_base::close(
{
if (is_open(impl))
{
- BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "close"));
+ BOOST_ASIO_HANDLER_OPERATION((iocp_service_.context(),
+ "socket", &impl, impl.socket_, "close"));
// Check if the reactor was created, in which case we need to close the
// socket on the reactor as well to cancel any operations that might be
// running there.
- reactor* r = static_cast<reactor*>(
+ select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (r)
@@ -198,6 +199,39 @@ boost::system::error_code win_iocp_socket_service_base::close(
return ec;
}
+socket_type win_iocp_socket_service_base::release(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ if (!is_open(impl))
+ return invalid_socket;
+
+ cancel(impl, ec);
+ if (ec)
+ return invalid_socket;
+
+ nt_set_info_fn fn = get_nt_set_info();
+ if (fn == 0)
+ {
+ ec = boost::asio::error::operation_not_supported;
+ return invalid_socket;
+ }
+
+ HANDLE sock_as_handle = reinterpret_cast<HANDLE>(impl.socket_);
+ ULONG_PTR iosb[2] = { 0, 0 };
+ void* info[2] = { 0, 0 };
+ if (fn(sock_as_handle, iosb, &info, sizeof(info),
+ 61 /* FileReplaceCompletionInformation */))
+ {
+ ec = boost::asio::error::operation_not_supported;
+ return invalid_socket;
+ }
+
+ socket_type tmp = impl.socket_;
+ impl.socket_ = invalid_socket;
+ return tmp;
+}
+
boost::system::error_code win_iocp_socket_service_base::cancel(
win_iocp_socket_service_base::base_implementation_type& impl,
boost::system::error_code& ec)
@@ -208,7 +242,8 @@ boost::system::error_code win_iocp_socket_service_base::cancel(
return ec;
}
- BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "cancel"));
+ BOOST_ASIO_HANDLER_OPERATION((iocp_service_.context(),
+ "socket", &impl, impl.socket_, "cancel"));
if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(
::GetModuleHandleA("KERNEL32"), "CancelIoEx"))
@@ -279,7 +314,7 @@ boost::system::error_code win_iocp_socket_service_base::cancel(
// Cancel any operations started via the reactor.
if (!ec)
{
- reactor* r = static_cast<reactor*>(
+ select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (r)
@@ -445,7 +480,7 @@ void win_iocp_socket_service_base::start_null_buffers_receive_op(
{
start_reactor_op(impl,
(flags & socket_base::message_out_of_band)
- ? reactor::except_op : reactor::read_op,
+ ? select_reactor::except_op : select_reactor::read_op,
op);
}
}
@@ -537,7 +572,7 @@ void win_iocp_socket_service_base::start_reactor_op(
win_iocp_socket_service_base::base_implementation_type& impl,
int op_type, reactor_op* op)
{
- reactor& r = get_reactor();
+ select_reactor& r = get_reactor();
update_cancellation_thread_id(impl);
if (is_open(impl))
@@ -598,7 +633,7 @@ void win_iocp_socket_service_base::start_connect_op(
}
// Otherwise, fall back to a reactor-based implementation.
- reactor& r = get_reactor();
+ select_reactor& r = get_reactor();
update_cancellation_thread_id(impl);
if ((impl.state_ & socket_ops::non_blocking) != 0
@@ -611,7 +646,7 @@ void win_iocp_socket_service_base::start_connect_op(
|| op->ec_ == boost::asio::error::would_block)
{
op->ec_ = boost::system::error_code();
- r.start_op(reactor::connect_op, impl.socket_,
+ r.start_op(select_reactor::connect_op, impl.socket_,
impl.reactor_data_, op, false, false);
return;
}
@@ -626,12 +661,13 @@ void win_iocp_socket_service_base::close_for_destruction(
{
if (is_open(impl))
{
- BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "close"));
+ BOOST_ASIO_HANDLER_OPERATION((iocp_service_.context(),
+ "socket", &impl, impl.socket_, "close"));
// Check if the reactor was created, in which case we need to close the
// socket on the reactor as well to cancel any operations that might be
// running there.
- reactor* r = static_cast<reactor*>(
+ select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (r)
@@ -665,14 +701,14 @@ void win_iocp_socket_service_base::update_cancellation_thread_id(
#endif // defined(BOOST_ASIO_ENABLE_CANCELIO)
}
-reactor& win_iocp_socket_service_base::get_reactor()
+select_reactor& win_iocp_socket_service_base::get_reactor()
{
- reactor* r = static_cast<reactor*>(
+ select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (!r)
{
- r = &(use_service<reactor>(io_service_));
+ r = &(use_service<select_reactor>(io_context_));
interlocked_exchange_pointer(reinterpret_cast<void**>(&reactor_), r);
}
return *r;
@@ -713,6 +749,24 @@ win_iocp_socket_service_base::get_connect_ex(
#endif // defined(BOOST_ASIO_DISABLE_CONNECTEX)
}
+win_iocp_socket_service_base::nt_set_info_fn
+win_iocp_socket_service_base::get_nt_set_info()
+{
+ void* ptr = interlocked_compare_exchange_pointer(&nt_set_info_, 0, 0);
+ if (!ptr)
+ {
+ if (HMODULE h = ::GetModuleHandleA("NTDLL.DLL"))
+ ptr = reinterpret_cast<void*>(GetProcAddress(h, "NtSetInformationFile"));
+
+ // On failure, set nt_set_info_ to a special value to indicate that the
+ // NtSetInformationFile function is unavailable. That way we won't bother
+ // trying to look it up again.
+ interlocked_exchange_pointer(&nt_set_info_, ptr ? ptr : this);
+ }
+
+ return reinterpret_cast<nt_set_info_fn>(ptr == this ? 0 : ptr);
+}
+
void* win_iocp_socket_service_base::interlocked_compare_exchange_pointer(
void** dest, void* exch, void* cmp)
{
diff --git a/boost/asio/detail/impl/win_object_handle_service.ipp b/boost/asio/detail/impl/win_object_handle_service.ipp
index a940161cba..31718a012b 100644
--- a/boost/asio/detail/impl/win_object_handle_service.ipp
+++ b/boost/asio/detail/impl/win_object_handle_service.ipp
@@ -29,15 +29,16 @@ namespace asio {
namespace detail {
win_object_handle_service::win_object_handle_service(
- boost::asio::io_service& io_service)
- : io_service_(boost::asio::use_service<io_service_impl>(io_service)),
+ boost::asio::io_context& io_context)
+ : service_base<win_object_handle_service>(io_context),
+ io_context_(boost::asio::use_service<io_context_impl>(io_context)),
mutex_(),
impl_list_(0),
shutdown_(false)
{
}
-void win_object_handle_service::shutdown_service()
+void win_object_handle_service::shutdown()
{
mutex::scoped_lock lock(mutex_);
@@ -52,7 +53,7 @@ void win_object_handle_service::shutdown_service()
lock.unlock();
- io_service_.abandon_operations(ops);
+ io_context_.abandon_operations(ops);
}
void win_object_handle_service::construct(
@@ -178,7 +179,8 @@ void win_object_handle_service::destroy(
if (is_open(impl))
{
- BOOST_ASIO_HANDLER_OPERATION(("object_handle", &impl, "close"));
+ BOOST_ASIO_HANDLER_OPERATION((io_context_.context(), "object_handle",
+ &impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), "close"));
HANDLE wait_handle = impl.wait_handle_;
impl.wait_handle_ = INVALID_HANDLE_VALUE;
@@ -202,7 +204,7 @@ void win_object_handle_service::destroy(
::CloseHandle(impl.handle_);
impl.handle_ = INVALID_HANDLE_VALUE;
- io_service_.post_deferred_completions(ops);
+ io_context_.post_deferred_completions(ops);
}
}
@@ -227,7 +229,8 @@ boost::system::error_code win_object_handle_service::close(
{
if (is_open(impl))
{
- BOOST_ASIO_HANDLER_OPERATION(("object_handle", &impl, "close"));
+ BOOST_ASIO_HANDLER_OPERATION((io_context_.context(), "object_handle",
+ &impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), "close"));
mutex::scoped_lock lock(mutex_);
@@ -262,7 +265,7 @@ boost::system::error_code win_object_handle_service::close(
boost::asio::error::get_system_category());
}
- io_service_.post_deferred_completions(completed_ops);
+ io_context_.post_deferred_completions(completed_ops);
}
else
{
@@ -278,7 +281,8 @@ boost::system::error_code win_object_handle_service::cancel(
{
if (is_open(impl))
{
- BOOST_ASIO_HANDLER_OPERATION(("object_handle", &impl, "cancel"));
+ BOOST_ASIO_HANDLER_OPERATION((io_context_.context(), "object_handle",
+ &impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), "cancel"));
mutex::scoped_lock lock(mutex_);
@@ -303,7 +307,7 @@ boost::system::error_code win_object_handle_service::cancel(
ec = boost::system::error_code();
- io_service_.post_deferred_completions(completed_ops);
+ io_context_.post_deferred_completions(completed_ops);
}
else
{
@@ -337,7 +341,7 @@ void win_object_handle_service::wait(
void win_object_handle_service::start_wait_op(
win_object_handle_service::implementation_type& impl, wait_op* op)
{
- io_service_.work_started();
+ io_context_.work_started();
if (is_open(impl))
{
@@ -355,13 +359,13 @@ void win_object_handle_service::start_wait_op(
else
{
lock.unlock();
- io_service_.post_deferred_completion(op);
+ io_context_.post_deferred_completion(op);
}
}
else
{
op->ec_ = boost::asio::error::bad_descriptor;
- io_service_.post_deferred_completion(op);
+ io_context_.post_deferred_completion(op);
}
}
@@ -388,7 +392,7 @@ void win_object_handle_service::register_wait_callback(
}
lock.unlock();
- io_service_.post_deferred_completions(completed_ops);
+ io_context_.post_deferred_completions(completed_ops);
}
}
@@ -430,9 +434,9 @@ void win_object_handle_service::wait_callback(PVOID param, BOOLEAN)
}
}
- io_service_impl& ios = impl->owner_->io_service_;
+ io_context_impl& ioc = impl->owner_->io_context_;
lock.unlock();
- ios.post_deferred_completions(completed_ops);
+ ioc.post_deferred_completions(completed_ops);
}
}
diff --git a/boost/asio/detail/impl/win_thread.ipp b/boost/asio/detail/impl/win_thread.ipp
index e2d9384007..c90c3f3986 100644
--- a/boost/asio/detail/impl/win_thread.ipp
+++ b/boost/asio/detail/impl/win_thread.ipp
@@ -56,6 +56,13 @@ void win_thread::join()
}
}
+std::size_t win_thread::hardware_concurrency()
+{
+ SYSTEM_INFO system_info;
+ ::GetSystemInfo(&system_info);
+ return system_info.dwNumberOfProcessors;
+}
+
void win_thread::start_thread(func_base* arg, unsigned int stack_size)
{
::HANDLE entry_event = 0;
diff --git a/boost/asio/detail/impl/win_tss_ptr.ipp b/boost/asio/detail/impl/win_tss_ptr.ipp
index 3390066695..105cf3022c 100644
--- a/boost/asio/detail/impl/win_tss_ptr.ipp
+++ b/boost/asio/detail/impl/win_tss_ptr.ipp
@@ -32,9 +32,9 @@ namespace detail {
DWORD win_tss_ptr_create()
{
#if defined(UNDER_CE)
- enum { out_of_indexes = 0xFFFFFFFF };
+ const DWORD out_of_indexes = 0xFFFFFFFF;
#else
- enum { out_of_indexes = TLS_OUT_OF_INDEXES };
+ const DWORD out_of_indexes = TLS_OUT_OF_INDEXES;
#endif
DWORD tss_key = ::TlsAlloc();
diff --git a/boost/asio/detail/impl/winrt_ssocket_service_base.ipp b/boost/asio/detail/impl/winrt_ssocket_service_base.ipp
index f5990a97ad..a7a340b70b 100644
--- a/boost/asio/detail/impl/winrt_ssocket_service_base.ipp
+++ b/boost/asio/detail/impl/winrt_ssocket_service_base.ipp
@@ -31,15 +31,15 @@ namespace asio {
namespace detail {
winrt_ssocket_service_base::winrt_ssocket_service_base(
- boost::asio::io_service& io_service)
- : io_service_(use_service<io_service_impl>(io_service)),
- async_manager_(use_service<winrt_async_manager>(io_service)),
+ boost::asio::io_context& io_context)
+ : io_context_(use_service<io_context_impl>(io_context)),
+ async_manager_(use_service<winrt_async_manager>(io_context)),
mutex_(),
impl_list_(0)
{
}
-void winrt_ssocket_service_base::shutdown_service()
+void winrt_ssocket_service_base::base_shutdown()
{
// Close all implementations, causing all operations to complete.
boost::asio::detail::mutex::scoped_lock lock(mutex_);
@@ -149,6 +149,23 @@ boost::system::error_code winrt_ssocket_service_base::close(
return ec;
}
+winrt_ssocket_service_base::native_handle_type
+winrt_ssocket_service_base::release(
+ winrt_ssocket_service_base::base_implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ if (!is_open(impl))
+ return nullptr;
+
+ cancel(impl, ec);
+ if (ec)
+ return nullptr;
+
+ native_handle_type tmp = impl.socket_;
+ impl.socket_ = nullptr;
+ return tmp;
+}
+
std::size_t winrt_ssocket_service_base::do_get_endpoint(
const base_implementation_type& impl, bool local,
void* addr, std::size_t addr_len, boost::system::error_code& ec) const
@@ -382,7 +399,7 @@ void winrt_ssocket_service_base::start_connect_op(
if (!is_open(impl))
{
op->ec_ = boost::asio::error::bad_descriptor;
- io_service_.post_immediate_completion(op, is_continuation);
+ io_context_.post_immediate_completion(op, is_continuation);
return;
}
@@ -411,7 +428,7 @@ void winrt_ssocket_service_base::start_connect_op(
if (op->ec_)
{
- io_service_.post_immediate_completion(op, is_continuation);
+ io_context_.post_immediate_completion(op, is_continuation);
return;
}
@@ -426,7 +443,7 @@ void winrt_ssocket_service_base::start_connect_op(
{
op->ec_ = boost::system::error_code(
e->HResult, boost::system::system_category());
- io_service_.post_immediate_completion(op, is_continuation);
+ io_context_.post_immediate_completion(op, is_continuation);
}
}
@@ -450,7 +467,7 @@ std::size_t winrt_ssocket_service_base::do_send(
try
{
buffer_sequence_adapter<boost::asio::const_buffer,
- boost::asio::const_buffers_1> bufs(boost::asio::buffer(data));
+ boost::asio::const_buffer> bufs(boost::asio::buffer(data));
if (bufs.all_empty())
{
@@ -477,25 +494,25 @@ void winrt_ssocket_service_base::start_send_op(
if (flags)
{
op->ec_ = boost::asio::error::operation_not_supported;
- io_service_.post_immediate_completion(op, is_continuation);
+ io_context_.post_immediate_completion(op, is_continuation);
return;
}
if (!is_open(impl))
{
op->ec_ = boost::asio::error::bad_descriptor;
- io_service_.post_immediate_completion(op, is_continuation);
+ io_context_.post_immediate_completion(op, is_continuation);
return;
}
try
{
buffer_sequence_adapter<boost::asio::const_buffer,
- boost::asio::const_buffers_1> bufs(boost::asio::buffer(data));
+ boost::asio::const_buffer> bufs(boost::asio::buffer(data));
if (bufs.all_empty())
{
- io_service_.post_immediate_completion(op, is_continuation);
+ io_context_.post_immediate_completion(op, is_continuation);
return;
}
@@ -506,7 +523,7 @@ void winrt_ssocket_service_base::start_send_op(
{
op->ec_ = boost::system::error_code(e->HResult,
boost::system::system_category());
- io_service_.post_immediate_completion(op, is_continuation);
+ io_context_.post_immediate_completion(op, is_continuation);
}
}
@@ -530,7 +547,7 @@ std::size_t winrt_ssocket_service_base::do_receive(
try
{
buffer_sequence_adapter<boost::asio::mutable_buffer,
- boost::asio::mutable_buffers_1> bufs(boost::asio::buffer(data));
+ boost::asio::mutable_buffer> bufs(boost::asio::buffer(data));
if (bufs.all_empty())
{
@@ -568,25 +585,25 @@ void winrt_ssocket_service_base::start_receive_op(
if (flags)
{
op->ec_ = boost::asio::error::operation_not_supported;
- io_service_.post_immediate_completion(op, is_continuation);
+ io_context_.post_immediate_completion(op, is_continuation);
return;
}
if (!is_open(impl))
{
op->ec_ = boost::asio::error::bad_descriptor;
- io_service_.post_immediate_completion(op, is_continuation);
+ io_context_.post_immediate_completion(op, is_continuation);
return;
}
try
{
buffer_sequence_adapter<boost::asio::mutable_buffer,
- boost::asio::mutable_buffers_1> bufs(boost::asio::buffer(data));
+ boost::asio::mutable_buffer> bufs(boost::asio::buffer(data));
if (bufs.all_empty())
{
- io_service_.post_immediate_completion(op, is_continuation);
+ io_context_.post_immediate_completion(op, is_continuation);
return;
}
@@ -599,7 +616,7 @@ void winrt_ssocket_service_base::start_receive_op(
{
op->ec_ = boost::system::error_code(e->HResult,
boost::system::system_category());
- io_service_.post_immediate_completion(op, is_continuation);
+ io_context_.post_immediate_completion(op, is_continuation);
}
}
diff --git a/boost/asio/detail/impl/winrt_timer_scheduler.hpp b/boost/asio/detail/impl/winrt_timer_scheduler.hpp
index 8d93e57962..b1522b4673 100644
--- a/boost/asio/detail/impl/winrt_timer_scheduler.hpp
+++ b/boost/asio/detail/impl/winrt_timer_scheduler.hpp
@@ -47,12 +47,12 @@ void winrt_timer_scheduler::schedule_timer(timer_queue<Time_Traits>& queue,
if (shutdown_)
{
- io_service_.post_immediate_completion(op, false);
+ io_context_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
- io_service_.work_started();
+ io_context_.work_started();
if (earliest)
event_.signal(lock);
}
@@ -66,10 +66,23 @@ std::size_t winrt_timer_scheduler::cancel_timer(timer_queue<Time_Traits>& queue,
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
- io_service_.post_deferred_completions(ops);
+ io_context_.post_deferred_completions(ops);
return n;
}
+template <typename Time_Traits>
+void winrt_timer_scheduler::move_timer(timer_queue<Time_Traits>& queue,
+ typename timer_queue<Time_Traits>::per_timer_data& to,
+ typename timer_queue<Time_Traits>::per_timer_data& from)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ op_queue<operation> ops;
+ queue.cancel_timer(to, ops);
+ queue.move_timer(to, from);
+ lock.unlock();
+ scheduler_.post_deferred_completions(ops);
+}
+
} // namespace detail
} // namespace asio
} // namespace boost
diff --git a/boost/asio/detail/impl/winrt_timer_scheduler.ipp b/boost/asio/detail/impl/winrt_timer_scheduler.ipp
index 914849d822..2e8145b6b8 100644
--- a/boost/asio/detail/impl/winrt_timer_scheduler.ipp
+++ b/boost/asio/detail/impl/winrt_timer_scheduler.ipp
@@ -29,9 +29,9 @@ namespace asio {
namespace detail {
winrt_timer_scheduler::winrt_timer_scheduler(
- boost::asio::io_service& io_service)
- : boost::asio::detail::service_base<winrt_timer_scheduler>(io_service),
- io_service_(use_service<io_service_impl>(io_service)),
+ boost::asio::io_context& io_context)
+ : boost::asio::detail::service_base<winrt_timer_scheduler>(io_context),
+ io_context_(use_service<io_context_impl>(io_context)),
mutex_(),
event_(),
timer_queues_(),
@@ -45,10 +45,10 @@ winrt_timer_scheduler::winrt_timer_scheduler(
winrt_timer_scheduler::~winrt_timer_scheduler()
{
- shutdown_service();
+ shutdown();
}
-void winrt_timer_scheduler::shutdown_service()
+void winrt_timer_scheduler::shutdown()
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
shutdown_ = true;
@@ -65,10 +65,10 @@ void winrt_timer_scheduler::shutdown_service()
op_queue<operation> ops;
timer_queues_.get_all_timers(ops);
- io_service_.abandon_operations(ops);
+ io_context_.abandon_operations(ops);
}
-void winrt_timer_scheduler::fork_service(boost::asio::io_service::fork_event)
+void winrt_timer_scheduler::notify_fork(boost::asio::io_context::fork_event)
{
}
@@ -90,7 +90,7 @@ void winrt_timer_scheduler::run_thread()
if (!ops.empty())
{
lock.unlock();
- io_service_.post_deferred_completions(ops);
+ io_context_.post_deferred_completions(ops);
lock.lock();
}
}